entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ScaledL2Norm
import torch import torch.onnx import torch import torch.nn as nn import torch.nn.functional as F class ScaledL2Norm(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2Norm, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def forward(self, x): return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2 ).unsqueeze(3) def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'initial_scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.onnx import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaledL2NormNew(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2NormNew, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
SoonminHwang/pytorch-ssd
ScaledL2Norm
false
9,498
[ "MIT" ]
0
1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8
https://github.com/SoonminHwang/pytorch-ssd/tree/1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8
Discriminator2
import torch import torch.utils.data import torch.nn as nn class Discriminator2(nn.Module): def __init__(self, n_h): super(Discriminator2, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2) sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 1) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp11 & xmask, other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf1 = buf0 del buf0 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_2 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf1, primals_3, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_3 return buf4, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class Discriminator2New(nn.Module): def __init__(self, n_h): super(Discriminator2New, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_2 = self.f_k.weight primals_3 = self.f_k.bias primals_1 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
XrosLiang/GraphCL
Discriminator2
false
5,996
[ "MIT" ]
1
fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c
https://github.com/XrosLiang/GraphCL/tree/fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c
TorchClampOptionMax
import torch class TorchClampOptionMax(torch.nn.Module): def forward(self, x): return torch.clamp(x, max=0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TorchClampOptionMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchClampOptionMax
false
10,544
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
Ones
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/a6/ca6cipguchfnp6u3kr7cqimnoh3ofbrs5j5govcb5fwb4udwfabz.py # Topologically Sorted Source Nodes: [ones], Original ATen: [aten.ones] # Source node to ATen node mapping: # ones => full # Graph fragment: # %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_ones_0 = async_compile.triton('triton_poi_fused_ones_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ones_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_ones_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ones], Original ATen: [aten.ones] stream0 = get_raw_stream(0) triton_poi_fused_ones_0.run(buf0, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf0, (4, 4, 4, 4), (0, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_ones_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_ones_0[grid(64)](buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (0, 16, 4, 1), 0), class OnesNew(torch.nn.Module): def __init__(self): super(OnesNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ai-in-motion/moai
Ones
false
18,341
[ "Apache-2.0" ]
10
e38cac046c059d2e2331ef4883bbabc5a500a5cf
https://github.com/ai-in-motion/moai/tree/e38cac046c059d2e2331ef4883bbabc5a500a5cf
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/z4/cz4rdmnjzva3wxtwkxdq32ntpuxr4xa3itqmggsv52y455k5rfxs.py # Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha, eq, pred, sub_1, pt, sub_2, pow_1, mul_2, ce, mul_3], Original ATen: [aten.mul, aten.rsub, aten.add, aten.eq, aten.sigmoid, aten.where, aten.pow, aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # alpha => add # ce => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # eq => eq # mul => mul_1 # mul_1 => mul_2 # mul_2 => mul_3 # mul_3 => mul_4 # pow_1 => pow_1 # pred => sigmoid # pt => where # sub => sub_3 # sub_1 => sub_4 # sub_2 => sub_5 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 0.75), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %sigmoid, %sub_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %where), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_2), kwargs = {}) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0 = async_compile.triton('triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp0 == tmp3 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp7 * tmp14 tmp16 = tmp4 * tmp9 tmp17 = 0.0 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl_math.abs(tmp9) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp16 - tmp23 tmp25 = tmp15 * tmp24 tl.store(out_ptr0 + (x0), tmp25, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha, eq, pred, sub_1, pt, sub_2, pow_1, mul_2, ce, mul_3], Original ATen: [aten.mul, aten.rsub, aten.add, aten.eq, aten.sigmoid, aten.where, aten.pow, aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda import torch.distributed import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp9 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = tmp0 == tmp3 tmp10 = tl.sigmoid(tmp9) tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp7 * tmp14 tmp16 = tmp4 * tmp9 tmp17 = 0.0 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl_math.abs(tmp9) tmp20 = -tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = libdevice.log1p(tmp21) tmp23 = tmp18 - tmp22 tmp24 = tmp16 - tmp23 tmp25 = tmp15 * tmp24 tl.store(out_ptr0 + x0, tmp25, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_eq_mul_pow_rsub_sigmoid_where_0[ grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FocalLossNew(nn.Module): """Focal Loss - https://arxiv.org/abs/1708.02002""" def __init__(self, alpha=0.25, gamma=2): super().__init__() self.alpha = alpha self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
krisk84/retinanet-examples
FocalLoss
false
12,685
[ "BSD-3-Clause" ]
0
174d95f3aabe1746d105c66f87aa445607f4eab8
https://github.com/krisk84/retinanet-examples/tree/174d95f3aabe1746d105c66f87aa445607f4eab8
GE2ELoss
import torch import torch.nn.functional as F import torch.nn as nn def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.sum() return loss, per_embedding_loss def get_centroids(embeddings): centroids = embeddings.mean(dim=1) return centroids def get_utterance_centroids(embeddings): """ Returns the centroids for each utterance of a speaker, where the utterance centroid is the speaker centroid without considering this utterance Shape of embeddings should be: (speaker_ct, utterance_per_speaker_ct, embedding_size) """ sum_centroids = embeddings.sum(dim=1) sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1, sum_centroids.shape[-1]) num_utterances = embeddings.shape[1] - 1 centroids = (sum_centroids - embeddings) / num_utterances return centroids def get_cossim(embeddings, centroids): num_utterances = embeddings.shape[1] utterance_centroids = get_utterance_centroids(embeddings) utterance_centroids_flat = utterance_centroids.view(utterance_centroids .shape[0] * utterance_centroids.shape[1], -1) embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1) cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat) centroids_expand = centroids.repeat((num_utterances * embeddings.shape[ 0], 1)) embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings. shape[0], 1) embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] * embeddings_expand.shape[1], embeddings_expand.shape[-1]) cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand) cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids. size(0)) same_idx = list(range(embeddings.size(0))) cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances) cos_diff = cos_diff + 1e-06 return cos_diff class GE2ELoss(nn.Module): def __init__(self, device): super(GE2ELoss, self).__init__() self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True) self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True) self.device = device def forward(self, embeddings): torch.clamp(self.w, 1e-06) centroids = get_centroids(embeddings) cossim = get_cossim(embeddings, centroids) sim_matrix = self.w * cossim + self.b loss, _ = calc_loss(sim_matrix) return loss def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 - tmp7 tmp9 = 0.3333333333333333 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_linalg_vector_norm_mean_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * (x0 % 4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr0 + (4 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (12 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (1 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (5 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (9 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (13 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (2 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (6 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (10 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (14 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (3 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (7 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (11 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (15 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp8 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp16 / tmp7 tmp18 = tmp17 * tmp17 tmp19 = tmp9 + tmp18 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp26 = tmp24 + tmp25 tmp27 = tmp26 / tmp7 tmp28 = tmp27 * tmp27 tmp29 = tmp19 + tmp28 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp36 = tmp34 + tmp35 tmp37 = tmp36 / tmp7 tmp38 = tmp37 * tmp37 tmp39 = tmp29 + tmp38 tl.store(out_ptr0 + x0, tmp39, xmask) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3(in_ptr0 , in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask) tmp1 = tl.load(in_ptr0 + 4 * (x1 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (1 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 4)), xmask) tmp17 = tl.load(in_ptr0 + (4 + x0 + 16 * (x1 % 4)), xmask) tmp19 = tl.load(in_ptr0 + (8 + x0 + 16 * (x1 % 4)), xmask) tmp21 = tl.load(in_ptr0 + (12 + x0 + 16 * (x1 % 4)), xmask) tmp25 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = 4.0 tmp24 = tmp22 / tmp23 tmp26 = libdevice.sqrt(tmp25) tmp27 = triton_helpers.maximum(tmp26, tmp13) tmp28 = tmp24 / tmp27 tmp29 = tmp15 * tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_index_put_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp11 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tl.store(out_ptr0 + (4 * x0 + 17 * tmp10), tmp17, xmask) @triton.jit def triton_per_fused_add_exp_index_log_mul_sub_sum_6(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r0 = rindex % 4 r2 = rindex tmp11 = tl.load(in_ptr0 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp17 = tl.load(in_ptr2 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp0 = r1 tmp1 = tl.full([1, 1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1, 1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp13 = tl.load(in_ptr1 + (4 * r0 + 17 * tmp10), None, eviction_policy= 'evict_last') tmp14 = 1e-06 tmp15 = tmp13 + tmp14 tmp16 = tmp12 * tmp15 tmp19 = tmp16 + tmp18 tmp21 = tmp20 + tmp14 tmp22 = tmp12 * tmp21 tmp23 = tmp22 + tmp18 tmp24 = tl_math.exp(tmp23) tmp26 = tmp25 + tmp14 tmp27 = tmp12 * tmp26 tmp28 = tmp27 + tmp18 tmp29 = tl_math.exp(tmp28) tmp30 = tmp24 + tmp29 tmp32 = tmp31 + tmp14 tmp33 = tmp12 * tmp32 tmp34 = tmp33 + tmp18 tmp35 = tl_math.exp(tmp34) tmp36 = tmp30 + tmp35 tmp38 = tmp37 + tmp14 tmp39 = tmp12 * tmp38 tmp40 = tmp39 + tmp18 tmp41 = tl_math.exp(tmp40) tmp42 = tmp36 + tmp41 tmp43 = tmp42 + tmp14 tmp44 = tl_math.log(tmp43) tmp45 = tmp19 - tmp44 tmp46 = -1.0 tmp47 = tmp45 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.sum(tmp48, 1)[:, None] tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1[grid(64)]( primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (64, 1), (1, 64), 0) del buf0 triton_poi_fused_linalg_vector_norm_mean_repeat_2[grid(64)](primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3[ grid(256)](primals_2, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf2, (64,), (1,), 0) del buf2 triton_poi_fused_sum_4[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 triton_poi_fused_index_put_5[grid(16)](buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_exp_index_log_mul_sub_sum_6[grid(1)](primals_1, buf4, primals_3, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf7, primals_1, primals_3, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.sum() return loss, per_embedding_loss def get_centroids(embeddings): centroids = embeddings.mean(dim=1) return centroids def get_utterance_centroids(embeddings): """ Returns the centroids for each utterance of a speaker, where the utterance centroid is the speaker centroid without considering this utterance Shape of embeddings should be: (speaker_ct, utterance_per_speaker_ct, embedding_size) """ sum_centroids = embeddings.sum(dim=1) sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1, sum_centroids.shape[-1]) num_utterances = embeddings.shape[1] - 1 centroids = (sum_centroids - embeddings) / num_utterances return centroids def get_cossim(embeddings, centroids): num_utterances = embeddings.shape[1] utterance_centroids = get_utterance_centroids(embeddings) utterance_centroids_flat = utterance_centroids.view(utterance_centroids .shape[0] * utterance_centroids.shape[1], -1) embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1) cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat) centroids_expand = centroids.repeat((num_utterances * embeddings.shape[ 0], 1)) embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings. shape[0], 1) embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] * embeddings_expand.shape[1], embeddings_expand.shape[-1]) cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand) cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids. size(0)) same_idx = list(range(embeddings.size(0))) cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances) cos_diff = cos_diff + 1e-06 return cos_diff class GE2ELossNew(nn.Module): def __init__(self, device): super(GE2ELossNew, self).__init__() self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True) self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True) self.device = device def forward(self, input_0): primals_1 = self.w primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dodo0822/PyTorch_Speaker_Verification
GE2ELoss
false
3,447
[ "BSD-3-Clause" ]
0
5310f441894e77895de27380d31149629e309d0f
https://github.com/dodo0822/PyTorch_Speaker_Verification/tree/5310f441894e77895de27380d31149629e309d0f
AttentionLayer
import torch import torch.nn as nn import torch.nn.functional as F class AttentionLayer(nn.Module): def __init__(self, hidden_size): super(AttentionLayer, self).__init__() self.hidden_size = hidden_size def dot_product_attention(self, hidden, encoder_output): return torch.sum(hidden * encoder_output, dim=2) def forward(self, hidden, encoded_output): energies = self.dot_product_attention(hidden, encoded_output) energies = energies.t() return F.softmax(energies, dim=1).unsqueeze(1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + y0), ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + y0), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + y0), ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x1 + 4 * y0), tmp8, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](arg0_1, arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (1, 4), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0), class AttentionLayerNew(nn.Module): def __init__(self, hidden_size): super(AttentionLayerNew, self).__init__() self.hidden_size = hidden_size def dot_product_attention(self, hidden, encoder_output): return torch.sum(hidden * encoder_output, dim=2) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
u7javed/AI-Chatbot
AttentionLayer
false
10,873
[ "MIT" ]
0
d86916537e7b0b9a45f11d0fe0367fe9f66721e7
https://github.com/u7javed/AI-Chatbot/tree/d86916537e7b0b9a45f11d0fe0367fe9f66721e7
L2Norm
import torch import torch.nn as nn class L2Norm(nn.Module): """ Scale shall be learnable according to original paper scale: initial scale number chan_num: L2Norm channel number (norm over all channels) """ def __init__(self, scale=20, chan_num=512): super(L2Norm, self).__init__() self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1, chan_num, 1, 1)) def forward(self, data): return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp( min=1e-12).rsqrt() def get_inputs(): return [torch.rand([4, 512, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 16 x1 = xindex // 16 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 2048 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, xmask) @triton.jit def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1e-12 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = libdevice.rsqrt(tmp6) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 512 x3 = xindex x0 = xindex % 16 x2 = xindex // 8192 tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, None) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16), torch.float32) get_raw_stream(0) triton_red_fused_pow_sum_0[grid(256)](primals_2, buf0, 256, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf1 triton_per_fused_clamp_pow_rsqrt_sum_1[grid(64)](buf2, buf0, 64, 4, XBLOCK=64, num_warps=2, num_stages=1) del buf0 buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_mul_2[grid(32768)](primals_1, primals_2, buf2, buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf3, primals_2, buf2 class L2NormNew(nn.Module): """ Scale shall be learnable according to original paper scale: initial scale number chan_num: L2Norm channel number (norm over all channels) """ def __init__(self, scale=20, chan_num=512): super(L2NormNew, self).__init__() self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1, chan_num, 1, 1)) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
EldritchJS/inference_results_v0.5
L2Norm
false
409
[ "Apache-2.0" ]
0
5552490e184d9fc342d871fcc410ac423ea49053
https://github.com/EldritchJS/inference_results_v0.5/tree/5552490e184d9fc342d871fcc410ac423ea49053
SpatialGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/uc/cucdaa5tqnxykdmw5yqh7ir5ac35phopjcobljrg4rrtlnfjtuwd.py # Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_compress => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 2 x0 = xindex % 16 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 2, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + (x3), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hy/chyq5lqryq6qebxlfhdffupuca4px672fxychf3hng5vgomdaota.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k4/ck42otglczd2qtk6fovmlu2yv7bzgywgiadyjygqbvu4m2rftbjm.py # Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # scale => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_2.run(primals_1, buf2, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_1, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, buf0, buf2 class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=False, bias=True): super(BasicConv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-05, momentum=0.01, affine=True) if bn else None self.relu = nn.ReLU() if relu else None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return x class ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) class SpatialGateNew(nn.Module): def __init__(self): super(SpatialGateNew, self).__init__() kernel_size = 7 self.compress = ChannelPool() self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=( kernel_size - 1) // 2, relu=False) def forward(self, input_0): primals_2 = self.spatial.conv.weight primals_3 = self.spatial.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HolmesShuan/OISR-PyTorch
SpatialGate
false
13,793
[ "BSD-2-Clause" ]
141
bbe0c88f71fe565a2842df7971b62a9bc5a56c48
https://github.com/HolmesShuan/OISR-PyTorch/tree/bbe0c88f71fe565a2842df7971b62a9bc5a56c48
MSELoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F class MSELoss(nn.Module): def __init__(self, ratio=1, size_average=None, reduce=None, reduction= 'mean'): super(MSELoss, self).__init__() self.ratio = ratio self.size_average = size_average self.reduce = reduce self.reduction = reduction def forward(self, input, target, avg_factor=None): return self.ratio * F.mse_loss(input, target, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossNew(nn.Module): def __init__(self, ratio=1, size_average=None, reduce=None, reduction= 'mean'): super(MSELossNew, self).__init__() self.ratio = ratio self.size_average = size_average self.reduce = reduce self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Dogacel/mmfashion
MSELoss
false
11,413
[ "Apache-2.0" ]
0
e49613245c8501042edd7aeeaa8fb93e5ea13238
https://github.com/Dogacel/mmfashion/tree/e49613245c8501042edd7aeeaa8fb93e5ea13238
Concat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/m6/cm6nixhuxyf76p5x5wtskqwfrb5kdl2bfda4t34yltftpilyxauy.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (64*((-4) + x1))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-4 + x1)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ConcatNew(torch.nn.Module): """ Concat module for a functional concat""" def __init__(self, axis: 'int'=0): super(ConcatNew, self).__init__() self.axis = axis def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
arjunsuresh/aimet
Concat
false
12,326
[ "BSD-3-Clause" ]
0
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
SpatialGatherModule
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class SpatialGatherModule(nn.Module): """Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, scale): super(SpatialGatherModule, self).__init__() self.scale = scale def forward(self, feats, probs): """Forward function.""" batch_size, num_classes, _height, _width = probs.size() channels = feats.size(1) probs = probs.view(batch_size, num_classes, -1) feats = feats.view(batch_size, channels, -1) feats = feats.permute(0, 2, 1) probs = F.softmax(self.scale * probs, dim=2) ocr_context = torch.matmul(probs, feats) ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) return ocr_context def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK= 1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), class SpatialGatherModuleNew(nn.Module): """Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, scale): super(SpatialGatherModuleNew, self).__init__() self.scale = scale def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HusterRC/mmsegmentation
SpatialGatherModule
false
5,310
[ "Apache-2.0" ]
1
c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
https://github.com/HusterRC/mmsegmentation/tree/c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
ConveRTOuterFeedForward
import torch import torch.nn as nn import torch.nn.functional as fnn from torch.nn.modules.normalization import LayerNorm class ConveRTOuterFeedForward(nn.Module): """Fully-Connected 3-layer Linear Model""" def __init__(self, input_hidden: 'int', intermediate_hidden: 'int', dropout_rate: 'float'=0.0): """ :param input_hidden: first-hidden layer input embed-dim :type input_hidden: int :param intermediate_hidden: layer-(hidden)-layer middle point weight :type intermediate_hidden: int :param dropout_rate: dropout rate, defaults to None :type dropout_rate: float, optional """ super().__init__() self.linear_1 = nn.Linear(input_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_2 = nn.Linear(intermediate_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_3 = nn.Linear(intermediate_hidden, intermediate_hidden) self.norm = LayerNorm(intermediate_hidden) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """forward through fully-connected 3-layer :param x: fnn input :type x: torch.Tensor :return: return fnn output :rtype: torch.Tensor """ x = self.linear_1(x) x = self.linear_2(self.dropout(x)) x = self.linear_3(self.dropout(x)) return fnn.gelu(self.norm(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_hidden': 4, 'intermediate_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.modules.normalization import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_gelu_native_layer_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = 0.7071067811865476 tmp12 = tmp8 * tmp11 tmp13 = libdevice.erf(tmp12) tmp14 = 1.0 tmp15 = tmp13 + tmp14 tmp16 = tmp10 * tmp15 tl.store(in_out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf5 del buf5 triton_poi_fused_gelu_native_layer_norm_1[grid(256)](buf6, buf2, buf3, buf4, primals_8, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 return buf6, primals_8, primals_9, reinterpret_tensor(primals_3, (64, 4 ), (4, 1), 0), buf0, buf1, buf2, primals_6, primals_4 class ConveRTOuterFeedForwardNew(nn.Module): """Fully-Connected 3-layer Linear Model""" def __init__(self, input_hidden: 'int', intermediate_hidden: 'int', dropout_rate: 'float'=0.0): """ :param input_hidden: first-hidden layer input embed-dim :type input_hidden: int :param intermediate_hidden: layer-(hidden)-layer middle point weight :type intermediate_hidden: int :param dropout_rate: dropout rate, defaults to None :type dropout_rate: float, optional """ super().__init__() self.linear_1 = nn.Linear(input_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_2 = nn.Linear(intermediate_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_3 = nn.Linear(intermediate_hidden, intermediate_hidden) self.norm = LayerNorm(intermediate_hidden) def forward(self, input_0): primals_1 = self.linear_1.weight primals_2 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_6 = self.linear_3.weight primals_7 = self.linear_3.bias primals_8 = self.norm.weight primals_9 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
luweishuang/ConveRT-pytorch
ConveRTOuterFeedForward
false
10,569
[ "Apache-2.0" ]
0
e14aaf2287eb3a78ee7d83ea02d9bd322863227f
https://github.com/luweishuang/ConveRT-pytorch/tree/e14aaf2287eb3a78ee7d83ea02d9bd322863227f
Pool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ws/cwsernjijxsliy5iskqdqkl4a4mqrxpxpwdopiyuuvrgc74ugctd.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten._adaptive_avg_pool2d] # Source node to ATen node mapping: # adaptive_avg_pool2d => _adaptive_avg_pool2d # Graph fragment: # %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [5, 5]), kwargs = {}) triton_poi_fused__adaptive_avg_pool2d_0 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x4 = xindex tmp0 = ((4*x1) // 5) tmp1 = ((8 + (4*x1)) // 5) tmp2 = tmp0 < tmp1 tmp3 = ((4*x0) // 5) tmp4 = ((8 + (4*x0)) // 5) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + ((4*((4*x1) // 5)) + (16*x2) + ((4*x0) // 5)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + ((4*x0) // 5) tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + (4*((4*x1) // 5)) + (16*x2) + ((4*x0) // 5)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + ((4*x1) // 5) tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + (4*((4*x1) // 5)) + (16*x2) + ((4*x0) // 5)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + (4*((4*x1) // 5)) + (16*x2) + ((4*x0) // 5)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr0 + (x4), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten._adaptive_avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0.run(arg0_1, buf0, 400, grid=grid(400), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = 4 * x1 // 5 tmp1 = (8 + 4 * x1) // 5 tmp2 = tmp0 < tmp1 tmp3 = 4 * x0 // 5 tmp4 = (8 + 4 * x0) // 5 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + 4 * x0 // 5 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + 4 * x1 // 5 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0[grid(400)](arg0_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PoolNew(nn.Module): def __init__(self): super(PoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
borisfom/TRTorch
Pool
false
9,792
[ "BSD-3-Clause" ]
0
1660633c6f6a480cd123d9d91cabf4eced12e8f3
https://github.com/borisfom/TRTorch/tree/1660633c6f6a480cd123d9d91cabf4eced12e8f3
MultiHeadAttention
import math import torch import torch.nn as nn def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, q, k, v, mask): batch_size = q.shape[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.reshape_for_multi_heads_attention(q) k = self.reshape_for_multi_heads_attention(k) v = self.reshape_for_multi_heads_attention(v) scaled_attention, _attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.transpose(2, 1).contiguous().view( batch_size, -1, self.d_model) return self.wo(scaled_attention) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 16, 16])] def get_init_inputs(): return [[], {'d_model': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_div_eq_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp2, tmp6, tmp5) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp13 / tmp17 tl.store(out_ptr0 + (r1 + 16 * x0), tmp2, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp18, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .bool) buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_div_eq_masked_fill_1[grid(256)](primals_10, buf5, buf6, buf9, 256, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del primals_10 buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf6, buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttentionNew, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.wq.weight primals_3 = self.wq.bias primals_4 = self.wk.weight primals_5 = self.wk.bias primals_7 = self.wv.weight primals_8 = self.wv.bias primals_11 = self.wo.weight primals_12 = self.wo.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
NathanYanJing/TransformerReplication
MultiHeadAttention
false
11,748
[ "MIT" ]
0
b20f987dcc507724971f843c2d214c9c76bd8e34
https://github.com/NathanYanJing/TransformerReplication/tree/b20f987dcc507724971f843c2d214c9c76bd8e34
GradientReversal
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/u5/cu56dhpcth43gy4shrd7mcexf4nfa6qetnnhwe4mno4v6ug76h6j.py # Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone] # Source node to ATen node mapping: # clone => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GradientReversalFunction(torch.autograd.Function): """ Gradient Reversal Layer from: Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015) Forward pass is the identity function. In the backward pass, the upstream gradients are multiplied by -lambda (i.e. gradient is reversed) """ @staticmethod def forward(ctx, x, lambda_): ctx.lambda_ = lambda_ return x.clone() @staticmethod def backward(ctx, grads): lambda_ = ctx.lambda_ lambda_ = grads.new_tensor(lambda_) dx = -lambda_ * grads return dx, None class GradientReversalNew(torch.nn.Module): """ Gradient Reversal Layer Code from: https://github.com/jvanvugt/pytorch-domain-adaptation/blob/master/utils.py """ def __init__(self, lambda_=1): super(GradientReversalNew, self).__init__() self.lambda_ = lambda_ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ishine/CDFSE_FastSpeech2
GradientReversal
false
12,539
[ "MIT" ]
0
f0facd077fa3e11b2704f2e8a1d1315bd1f4f493
https://github.com/ishine/CDFSE_FastSpeech2/tree/f0facd077fa3e11b2704f2e8a1d1315bd1f4f493
ConcatPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dy/cdydayzef2fxcnqukm4em4cj2latyjpphw5luwkneuqlp4m5b7zg.py # Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # max_pool2d => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4], [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 + tmp0 tmp32 = tmp3 + tmp31 tmp33 = tmp5 + tmp32 tmp34 = tmp7 + tmp33 tmp35 = tmp9 + tmp34 tmp36 = tmp11 + tmp35 tmp37 = tmp13 + tmp36 tmp38 = tmp15 + tmp37 tmp39 = tmp17 + tmp38 tmp40 = tmp19 + tmp39 tmp41 = tmp21 + tmp40 tmp42 = tmp23 + tmp41 tmp43 = tmp25 + tmp42 tmp44 = tmp27 + tmp43 tmp45 = tmp29 + tmp44 tmp46 = 0.0625 tmp47 = tmp45 * tmp46 tl.store(out_ptr0 + (x0 + (8*x1)), tmp30, xmask) tl.store(out_ptr1 + (x0 + (8*x1)), tmp47, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 0) # alias buf1 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 4) # alias # Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 + tmp0 tmp32 = tmp3 + tmp31 tmp33 = tmp5 + tmp32 tmp34 = tmp7 + tmp33 tmp35 = tmp9 + tmp34 tmp36 = tmp11 + tmp35 tmp37 = tmp13 + tmp36 tmp38 = tmp15 + tmp37 tmp39 = tmp17 + tmp38 tmp40 = tmp19 + tmp39 tmp41 = tmp21 + tmp40 tmp42 = tmp23 + tmp41 tmp43 = tmp25 + tmp42 tmp44 = tmp27 + tmp43 tmp45 = tmp29 + tmp44 tmp46 = 0.0625 tmp47 = tmp45 * tmp46 tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask) tl.store(out_ptr1 + (x0 + 8 * x1), tmp47, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 0) buf1 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 4) get_raw_stream(0) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf2, class ConcatPool2dNew(nn.Module): """Layer that concats `AvgPool2d` and `MaxPool2d`""" def __init__(self, ks, stride=None, padding=0): super().__init__() self.ap = nn.AvgPool2d(ks, stride, padding) self.mp = nn.MaxPool2d(ks, stride, padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
davidleonfdez/face2anime
ConcatPool2d
false
1,801
[ "MIT" ]
0
896bf85a7aa28322cc9e9e586685db8cbbf39d89
https://github.com/davidleonfdez/face2anime/tree/896bf85a7aa28322cc9e9e586685db8cbbf39d89
LRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/gv/cgvon7iygyhjm2kmwtta5t2r3z2byfrr4qwpcmym3h4h6yzxvtvp.py # Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # div => pow_1 # div_1 => avg_pool2d # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [1, 1], [1, 1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 0.0001), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 2.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_avg_pool2d_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0001 tmp5 = tmp3 * tmp4 tmp6 = 2.0 tmp7 = tmp5 + tmp6 tmp8 = 0.75 tmp9 = libdevice.pow(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0001 tmp5 = tmp3 * tmp4 tmp6 = 2.0 tmp7 = tmp5 + tmp6 tmp8 = 0.75 tmp9 = libdevice.pow(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LRNNew(nn.Module): def __init__(self, local_size=1, alpha=0.0001, beta=0.75, ACROSS_CHANNELS=False): super(LRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if self.ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PengJingchao/DFNet
LRN
false
937
[ "MIT" ]
0
49e83501f81515aebca211351e315896da7afc54
https://github.com/PengJingchao/DFNet/tree/49e83501f81515aebca211351e315896da7afc54
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2) self.prelu1_1 = nn.PReLU() self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2) self.prelu1_2 = nn.PReLU() self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2) self.prelu2_1 = nn.PReLU() self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2) self.prelu2_2 = nn.PReLU() self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2) self.prelu3_1 = nn.PReLU() self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2) self.prelu3_2 = nn.PReLU() self.preluip1 = nn.PReLU() self.ip1 = nn.Linear(128 * 3 * 3, 2) self.ip2 = nn.Linear(2, 10, bias=False) def forward(self, x): x = self.prelu1_1(self.conv1_1(x)) x = self.prelu1_2(self.conv1_2(x)) x = F.max_pool2d(x, 2) x = self.prelu2_1(self.conv2_1(x)) x = self.prelu2_2(self.conv2_2(x)) x = F.max_pool2d(x, 2) x = self.prelu3_1(self.conv3_1(x)) x = self.prelu3_2(self.conv3_2(x)) x = F.max_pool2d(x, 2) x = x.view(-1, 128 * 3 * 3) ip1 = self.preluip1(self.ip1(x)) ip2 = self.ip2(ip1) return ip1, F.log_softmax(ip2, dim=1) def get_inputs(): return [torch.rand([4, 1, 24, 24])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 576 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 576 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 32 * x2 + 18432 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 12 x2 = xindex // 384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 1536 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 1536 * x2), None) tmp3 = tl.load(in_ptr0 + (768 + x0 + 64 * x1 + 1536 * x2), None) tmp5 = tl.load(in_ptr0 + (800 + x0 + 64 * x1 + 1536 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 6 x2 = xindex // 384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1536 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1536 * x2), xmask) tmp3 = tl.load(in_ptr0 + (768 + x0 + 128 * x1 + 1536 * x2), xmask) tmp5 = tl.load(in_ptr0 + (832 + x0 + 128 * x1 + 1536 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 36 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y5 = yindex y4 = yindex // 9 y6 = yindex % 9 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (768 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (896 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 9 * x2 + 1152 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused__log_softmax_14(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_12, (64,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (1,), (1,)) assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_18, (128,), (1,)) assert_size_stride(primals_19, (1,), (1,)) assert_size_stride(primals_20, (2, 1152), (1152, 1)) assert_size_stride(primals_21, (2,), (1,)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (10, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 25)](primals_5, buf0, 1024, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_5 buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch. float32) triton_poi_fused_1[grid(2048, 25)](primals_8, buf1, 2048, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch .float32) triton_poi_fused_2[grid(4096, 25)](primals_11, buf2, 4096, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_3[grid(8192, 25)](primals_14, buf3, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_14 buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_4[grid(16384, 25)](primals_17, buf4, 16384, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_17 buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1)) buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32) triton_poi_fused_convolution_5[grid(128, 576)](buf5, primals_2, buf6, 128, 576, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0 ) del buf5 triton_poi_fused__prelu_kernel_6[grid(73728)](buf6, primals_4, buf7, 73728, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32) triton_poi_fused__prelu_kernel_convolution_7[grid(73728)](buf9, primals_6, primals_7, buf10, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_6 buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.float32) buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(18432)](buf10, buf11, buf12, 18432, XBLOCK=256, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32) triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf14, primals_9, primals_10, buf15, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64)) buf17 = buf16 del buf16 buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32) triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf17, primals_12, primals_13, buf18, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_12 buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch .float32) buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch .int8) triton_poi_fused_max_pool2d_with_indices_10[grid(9216)](buf18, buf19, buf20, 9216, XBLOCK=256, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32) triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf22, primals_15, primals_16, buf23, 18432, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32) triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf25, primals_18, primals_19, buf26, 18432, XBLOCK=128, num_warps=4, num_stages=1) del primals_18 buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128), torch.int8) buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(36, 128)](buf26, buf27, buf28, 36, 128, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152 ), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1, 1152), 0), alpha=1, beta=1, out=buf29) del primals_21 buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__prelu_kernel_13[grid(8)](buf29, primals_22, buf30, 8, XBLOCK=8, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.mm(buf30, reinterpret_tensor(primals_23, (2, 10), (1, 2), 0), out=buf31) buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_14[grid(4)](buf31, buf34, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf31 return (buf30, buf34, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12, buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), buf29, buf30, buf34, primals_23, primals_20) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2) self.prelu1_1 = nn.PReLU() self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2) self.prelu1_2 = nn.PReLU() self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2) self.prelu2_1 = nn.PReLU() self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2) self.prelu2_2 = nn.PReLU() self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2) self.prelu3_1 = nn.PReLU() self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2) self.prelu3_2 = nn.PReLU() self.preluip1 = nn.PReLU() self.ip1 = nn.Linear(128 * 3 * 3, 2) self.ip2 = nn.Linear(2, 10, bias=False) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.prelu1_1.weight primals_5 = self.conv1_2.weight primals_6 = self.conv1_2.bias primals_7 = self.prelu1_2.weight primals_8 = self.conv2_1.weight primals_9 = self.conv2_1.bias primals_10 = self.prelu2_1.weight primals_11 = self.conv2_2.weight primals_12 = self.conv2_2.bias primals_13 = self.prelu2_2.weight primals_14 = self.conv3_1.weight primals_15 = self.conv3_1.bias primals_16 = self.prelu3_1.weight primals_17 = self.conv3_2.weight primals_18 = self.conv3_2.bias primals_19 = self.prelu3_2.weight primals_22 = self.preluip1.weight primals_20 = self.ip1.weight primals_21 = self.ip1.bias primals_23 = self.ip2.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0], output[1]
jxgu1016/MNIST_with_centerloss.pytorch
Net
false
15,812
[ "MIT" ]
346
4e94cc77fe94056a7f1f081fcaf0325781ba0224
https://github.com/jxgu1016/MNIST_with_centerloss.pytorch/tree/4e94cc77fe94056a7f1f081fcaf0325781ba0224
LayerScaleBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_4, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/vs/cvs6icdxviah4kvxj7x53zy2cxi5vohxx6kljkphckzrvc3cntr4.py # Topologically Sorted Source Nodes: [q, attn], Original ATen: [aten.mul, aten.clone] # Source node to ATen node mapping: # attn => clone # q => mul_2 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_mul_2 = async_compile.triton('triton_poi_fused_clone_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5j/c5jbbs6rjuscr2km33ndvlmtkgcup7curz3fm3tk7stvjquhtikm.py # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear_1 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qh/cqhjawj74jhki6ttujjfuup7tdnvc4atfzmw7o2uoasyzrs5f2ht.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => amax, clone_3, exp, sub_1, sum_1 # Graph fragment: # %clone_3 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_3, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + (x2), tmp11, xmask) tl.store(out_ptr1 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2n/c2no6slg3kdm2omssc7xgqdy7bp47rufttfgcv7gbbfogwfv7ngf.py # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear_2 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/go/cgojyax3mzsggeiylukgnpnwa2eaupb5run3yfarghtgr6k2gbks.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_6 # Graph fragment: # %clone_6 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/z5/cz5vsepa6xzt2kxyz2o5pev7avr6mhxpo7iklp72hpog6wgaos5u.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_7 # Graph fragment: # %clone_7 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_8 = async_compile.triton('triton_poi_fused_clone_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/he/chelvagj4d5lscmfzmvqqdjzm5txfx45x7j7qh4gwkcdx2i3wvui.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_8 # Graph fragment: # %clone_8 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_13,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_9 = async_compile.triton('triton_poi_fused_clone_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/b5/cb56g3tvhbiqa54nbdvqrjnvglvsqxe3q6cr7qfdqipiteo3dpeo.py # Topologically Sorted Source Nodes: [x_1, mul_1, y], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # mul_1 => mul_3 # x_1 => add_4 # y => add_5 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_11), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %add_4), kwargs = {}) # %add_5 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %mul_3), kwargs = {}) triton_poi_fused_add_mul_10 = async_compile.triton('triton_poi_fused_add_mul_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp3 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tmp1 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/oq/coqzumelatufp5yu7vvdhpigthzr66jabgl2tfuy5kbvx35q2ezh.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_4 => add_8, erf, mul_6, mul_7, mul_8 # Graph fragment: # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_17, 0.5), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_17, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_7,), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %add_8), kwargs = {}) triton_poi_fused_gelu_11 = async_compile.triton('triton_poi_fused_gelu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ft/cft2a6imncwdsd5fye4tmq2k4anwxyebo7ti5nsvvg43n7kjjw2z.py # Topologically Sorted Source Nodes: [mul_2, z], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul_2 => mul_9 # z => add_9 # Graph fragment: # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_12, %view_19), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_9), kwargs = {}) triton_poi_fused_add_mul_12 = async_compile.triton('triton_poi_fused_add_mul_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (12, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16, ), (1, )) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_4, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, grid=grid(64), stream=stream0) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [q, attn], Original ATen: [aten.mul, aten.clone] triton_poi_fused_clone_mul_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf6, buf7, 64, 4, grid=grid(64, 4), stream=stream0) buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf8, primals_7, buf9, buf10, 64, grid=grid(64), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf8, primals_7, buf9, buf10, buf11, 256, grid=grid(256), stream=stream0) buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf12, primals_9, buf13, 16, 16, grid=grid(16, 16), stream=stream0) del primals_9 buf14 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_8.run(buf3, buf14, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf15 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_9.run(buf15, buf16, 16, 4, grid=grid(16, 4), stream=stream0) buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, mul_1, y], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_10.run(primals_4, primals_1, buf17, primals_11, buf18, 64, grid=grid(64), stream=stream0) buf19 = buf1; del buf1 # reuse buf20 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_0.run(buf18, buf19, buf20, 16, grid=grid(16), stream=stream0) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf18, buf19, buf20, primals_13, primals_14, buf21, 64, grid=grid(64), stream=stream0) del buf19 del buf20 del primals_14 buf22 = reinterpret_tensor(buf12, (16, 16), (16, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf22) del primals_16 buf23 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] triton_poi_fused_gelu_11.run(buf22, buf23, 256, grid=grid(256), stream=stream0) buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf24) del primals_18 buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, z], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_12.run(buf18, primals_12, buf24, buf25, 64, grid=grid(64), stream=stream0) return (buf18, buf25, primals_1, primals_4, primals_7, primals_11, primals_12, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, buf18, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), buf22, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), buf24, primals_17, primals_15, primals_10, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), primals_8, primals_6, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tmp1 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_gelu_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (12, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK =4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64, 4)](buf6, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0) del buf6 extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_5[grid(64)](buf8, primals_7, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(256)](buf8, primals_7, buf9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 16)](buf12, primals_9, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_9 buf14 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf9 triton_poi_fused_clone_8[grid(16, 4)](buf3, buf14, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf15 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0) del buf10 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_9[grid(16, 4)](buf15, buf16, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0) del buf15 extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_10[grid(64)](primals_4, primals_1, buf17, primals_11, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = buf1 del buf1 buf20 = buf0 del buf0 triton_poi_fused_native_layer_norm_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf18, buf19, buf20, primals_13, primals_14, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf19 del buf20 del primals_14 buf22 = reinterpret_tensor(buf12, (16, 16), (16, 1), 0) del buf12 extern_kernels.addmm(primals_16, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf22) del primals_16 buf23 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_11[grid(256)](buf22, buf23, 256, XBLOCK=256, num_warps=4, num_stages=1) buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_18, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf24) del primals_18 buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_12[grid(64)](buf18, primals_12, buf24, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf18, buf25, primals_1, primals_4, primals_7, primals_11, primals_12, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor( buf16, (16, 4), (4, 1), 0), buf17, buf18, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), buf22, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), buf24, primals_17, primals_15, primals_10, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), primals_8, primals_6, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_5) def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class TalkingHeadAttn(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScaleBlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=0.0001): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block(in_features=dim, hidden_features= mlp_hidden_dim, act_layer=act_layer, drop=drop) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) def forward(self, input_0): primals_1 = self.gamma_1 primals_2 = self.gamma_2 primals_3 = self.norm1.weight primals_7 = self.norm1.bias primals_5 = self.attn.qkv.weight primals_6 = self.attn.proj.weight primals_9 = self.attn.proj.bias primals_8 = self.attn.proj_l.weight primals_11 = self.attn.proj_l.bias primals_10 = self.attn.proj_w.weight primals_12 = self.attn.proj_w.bias primals_13 = self.norm2.weight primals_14 = self.norm2.bias primals_15 = self.mlp.fc1.weight primals_16 = self.mlp.fc1.bias primals_17 = self.mlp.fc2.weight primals_18 = self.mlp.fc2.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0], output[1]
yifanc96/yifanc-DL
LayerScaleBlock
false
11,101
[ "MIT" ]
0
25d56cec776fb151c8f6bcbd997bca94f07f3597
https://github.com/yifanc96/yifanc-DL/tree/25d56cec776fb151c8f6bcbd997bca94f07f3597
LocalResponseNormLayer
import torch import torch.nn as nn import torch.nn.functional as F class LocalResponseNormLayer(nn.Module): def forward(self, tensor, size=5, alpha=9.999999747378752e-05, beta= 0.75, k=1.0): return F.local_response_norm(tensor, size=size, alpha=alpha, beta= beta, k=k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp48 = tl.load(in_ptr0 + x3, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-32 + x3), tmp5 & xmask, other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = -1 + x1 tmp11 = tmp10 >= tmp1 tmp12 = tmp10 < tmp3 tmp13 = tmp11 & tmp12 tmp14 = tl.load(in_ptr0 + (-16 + x3), tmp13 & xmask, other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp13, tmp15, tmp16) tmp18 = tmp17 + tmp9 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tl.load(in_ptr0 + x3, tmp22 & xmask, other=0.0) tmp24 = tmp23 * tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp26 + tmp18 tmp28 = 1 + x1 tmp29 = tmp28 >= tmp1 tmp30 = tmp28 < tmp3 tmp31 = tmp29 & tmp30 tmp32 = tl.load(in_ptr0 + (16 + x3), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp35 + tmp27 tmp37 = 2 + x1 tmp38 = tmp37 >= tmp1 tmp39 = tmp37 < tmp3 tmp40 = tmp38 & tmp39 tmp41 = tl.load(in_ptr0 + (32 + x3), tmp40 & xmask, other=0.0) tmp42 = tmp41 * tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp40, tmp42, tmp43) tmp45 = tmp44 + tmp36 tmp46 = 0.2 tmp47 = tmp45 * tmp46 tmp49 = 9.999999747378752e-05 tmp50 = tmp47 * tmp49 tmp51 = 1.0 tmp52 = tmp50 + tmp51 tmp53 = 0.75 tmp54 = libdevice.pow(tmp52, tmp53) tmp55 = tmp48 / tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0[grid(256) ](buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, class LocalResponseNormLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
a-kore/lucent
LocalResponseNormLayer
false
1,334
[ "Apache-2.0" ]
0
6b2b4dfea45c36c99e16f9923104a532df80e0a8
https://github.com/a-kore/lucent/tree/6b2b4dfea45c36c99e16f9923104a532df80e0a8
C51ValueNetwork
import torch import numpy as np import torch.nn as nn class C51ValueNetwork(nn.Module): """Critic - return Q value from given states and actions. """ def __init__(self, num_states, num_actions, hidden_size, v_min, v_max, num_atoms, device='cuda'): """ Args: num_states (int): state dimension num_actions (int): action dimension hidden_size (int): size of the hidden layers v_min (float): minimum value for critic v_max (float): maximum value for critic num_atoms (int): number of atoms in distribution init_w: """ super(C51ValueNetwork, self).__init__() self.linear1 = nn.Linear(num_states + num_actions, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, num_atoms) self.z_atoms = np.linspace(v_min, v_max, num_atoms) self def forward(self, state, action): x = torch.cat([state, action], 1) x = torch.relu(self.linear1(x)) x = torch.relu(self.linear2(x)) x = self.linear3(x) return x def get_probs(self, state, action): return torch.softmax(self.forward(state, action), dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_states': 4, 'num_actions': 4, 'hidden_size': 4, 'v_min': 4, 'v_max': 4, 'num_atoms': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 return buf5, buf0, buf2, buf4, primals_7, primals_5 class C51ValueNetworkNew(nn.Module): """Critic - return Q value from given states and actions. """ def __init__(self, num_states, num_actions, hidden_size, v_min, v_max, num_atoms, device='cuda'): """ Args: num_states (int): state dimension num_actions (int): action dimension hidden_size (int): size of the hidden layers v_min (float): minimum value for critic v_max (float): maximum value for critic num_atoms (int): number of atoms in distribution init_w: """ super(C51ValueNetworkNew, self).__init__() self.linear1 = nn.Linear(num_states + num_actions, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, num_atoms) self.z_atoms = np.linspace(v_min, v_max, num_atoms) self def get_probs(self, state, action): return torch.softmax(self.forward(state, action), dim=1) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_1 = self.linear2.weight primals_6 = self.linear2.bias primals_2 = self.linear3.weight primals_8 = self.linear3.bias primals_5 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
HzcIrving/DLRL_PlayGround
C51ValueNetwork
false
8,286
[ "MIT" ]
27
0db9a4bdb87130d1d26aea1591ef74cbe6aaa43b
https://github.com/HzcIrving/DLRL_PlayGround/tree/0db9a4bdb87130d1d26aea1591ef74cbe6aaa43b
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float('-inf') tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = tmp39 != 0 tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48 != 0 tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = tmp53 != 0 tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + x3, tmp24, xmask) tl.store(out_ptr1 + x3, tmp35, xmask) tl.store(out_ptr2 + x3, tmp55, xmask) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.query.weight primals_3 = self.query.bias primals_5 = self.key.weight primals_6 = self.key.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
minjoong507/Image-Captioning-Transformer
BertSelfAttention
false
7,247
[ "MIT" ]
1
813060f0bb656e336154173f11e99a80362c8c2a
https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a
GatedConvTranspose
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6k/c6kazydtzigopxuzedfthhmhnydldamntm2carnmlp5uv53z3g7p.py # Topologically Sorted Source Nodes: [f, conv_transpose2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # conv_transpose2d_1 => convolution_1 # f => convolution # g => sigmoid # mul => mul # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 49) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [f], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) # Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [f, conv_transpose2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 784, grid=grid(784), stream=stream0) del primals_2 del primals_5 return (buf4, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0[grid(784)](buf1, buf3, primals_2, primals_5, buf4, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf3 class GatedConvTransposeNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1): super(GatedConvTransposeNew, self).__init__() self.layer_f = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding= output_padding, groups=groups) self.layer_g = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding= output_padding, groups=groups) def forward(self, input_0): primals_1 = self.layer_f.weight primals_2 = self.layer_f.bias primals_3 = self.layer_g.weight primals_5 = self.layer_g.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ClaraBing/ffjord
GatedConvTranspose
false
13,520
[ "MIT" ]
518
a97c34ff546a063316828f53bd041555e663428d
https://github.com/ClaraBing/ffjord/tree/a97c34ff546a063316828f53bd041555e663428d
CategoricalSampler
import torch import torch.nn as nn class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSampler(Sampler): def forward(self, logits): return torch.multinomial(logits.exp(), self.n_samples) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return buf2, class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSamplerNew(Sampler): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TSLNIHAOGIT/VRP_DRL_MHA
CategoricalSampler
false
14,457
[ "MIT" ]
55
6a59918ffb815fbdab4d75cb78130fc638c64d69
https://github.com/TSLNIHAOGIT/VRP_DRL_MHA/tree/6a59918ffb815fbdab4d75cb78130fc638c64d69
LastBlock
import torch import numpy as np import torch.nn as nn class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class LastBlock(nn.Module): """Implements the last block, which is a dense block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=1.0, use_bn=False): super().__init__() self.fc = nn.Linear(in_features=in_channels, out_features= out_channels, bias=False) self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() def forward(self, x): x = x.view(x.shape[0], -1) x = self.fc(x) * self.scale x = x.view(x.shape[0], x.shape[1], 1, 1) return self.bn(x).view(x.shape[0], x.shape[1]) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_1 class BatchNormLayer(nn.Module): """Implements batch normalization layer.""" def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon =1e-05): """Initializes with basic settings. Args: channels: Number of channels of the input tensor. gamma: Whether the scale (weight) of the affine mapping is learnable. beta: Whether the center (bias) of the affine mapping is learnable. decay: Decay factor for moving average operations in this layer. epsilon: A value added to the denominator for numerical stability. """ super().__init__() self.bn = nn.BatchNorm2d(num_features=channels, affine=True, track_running_stats=True, momentum=1 - decay, eps=epsilon) self.bn.weight.requires_grad = gamma self.bn.bias.requires_grad = beta def forward(self, x): return self.bn(x) class LastBlockNew(nn.Module): """Implements the last block, which is a dense block.""" def __init__(self, in_channels, out_channels, use_wscale=False, wscale_gain=1.0, use_bn=False): super().__init__() self.fc = nn.Linear(in_features=in_channels, out_features= out_channels, bias=False) self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0 self.bn = BatchNormLayer(channels=out_channels ) if use_bn else nn.Identity() def forward(self, input_0): primals_1 = self.fc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
thunguyenphuoc/idinvert_pytorch
LastBlock
false
13,133
[ "MIT" ]
0
bf8a81e75d193c22a05d9c4457907dc468389766
https://github.com/thunguyenphuoc/idinvert_pytorch/tree/bf8a81e75d193c22a05d9c4457907dc468389766
AgreementRouting
import torch import torch.nn as nn import torch.nn.functional as F def squash(x): lengths2 = x.pow(2).sum(dim=2) lengths = lengths2.sqrt() x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1) return x class AgreementRouting(nn.Module): def __init__(self, input_caps, output_caps, n_iterations): super(AgreementRouting, self).__init__() self.n_iterations = n_iterations self.b = nn.Parameter(torch.zeros((input_caps, output_caps))) def forward(self, u_predict): batch_size, input_caps, output_caps, _output_dim = u_predict.size() c = F.softmax(self.b) s = (c.unsqueeze(2) * u_predict).sum(dim=1) v = squash(s) if self.n_iterations > 0: b_batch = self.b.expand((batch_size, input_caps, output_caps)) for r in range(self.n_iterations): v = v.unsqueeze(1) b_batch = b_batch + (u_predict * v).sum(-1) c = F.softmax(b_batch.view(-1, output_caps)).view(-1, input_caps, output_caps, 1) s = (c * u_predict).sum(dim=1) v = squash(s) return v def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_caps': 4, 'output_caps': 4, 'n_iterations': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask) tmp3 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask) tmp7 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 + tmp12 tmp14 = tmp11 / tmp13 tmp15 = libdevice.sqrt(tmp11) tmp16 = tmp14 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x4 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr2 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp6 = tmp4 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp12 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tmp0 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask) tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask) tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_add_mul_sum_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp6 = tmp4 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp12 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tmp0 + tmp15 tl.store(in_out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(64)](buf1, primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_3[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_add_mul_sum_4[grid(64)](primals_2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 triton_poi_fused__softmax_5[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__softmax_6[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused_mul_sum_7[grid(64)](buf6, primals_1, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0) del buf6 triton_poi_fused_mul_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf4 del buf4 triton_poi_fused_add_mul_sum_8[grid(64)](buf9, primals_1, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_mul_sum_7[grid(64)](buf11, primals_1, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_mul_3[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = buf9 del buf9 triton_poi_fused_add_mul_sum_8[grid(64)](buf14, primals_1, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0) del buf13 triton_poi_fused__softmax_5[grid(64)](buf14, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0) del buf12 triton_poi_fused__softmax_6[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0) del buf15 triton_poi_fused_mul_sum_7[grid(64)](buf16, primals_1, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused_mul_3[grid(64)](buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 buf19 = buf14 del buf14 triton_poi_fused_add_mul_sum_8[grid(64)](buf19, primals_1, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = reinterpret_tensor(buf18, (16, 4), (4, 1), 0) del buf18 triton_poi_fused__softmax_5[grid(64)](buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf19, (16, 4), (4, 1), 0) del buf19 triton_poi_fused__softmax_6[grid(64)](buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0) del buf20 triton_poi_fused_mul_sum_7[grid(64)](buf21, primals_1, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0) del buf21 triton_poi_fused_mul_3[grid(64)](buf22, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf22 return buf23, primals_1, primals_2 def squash(x): lengths2 = x.pow(2).sum(dim=2) lengths = lengths2.sqrt() x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1) return x class AgreementRoutingNew(nn.Module): def __init__(self, input_caps, output_caps, n_iterations): super(AgreementRoutingNew, self).__init__() self.n_iterations = n_iterations self.b = nn.Parameter(torch.zeros((input_caps, output_caps))) def forward(self, input_0): primals_2 = self.b primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
bentrevett/capsules
AgreementRouting
false
3,237
[ "MIT" ]
0
239273de25c607d7a7504e8c6900772fddd15cd3
https://github.com/bentrevett/capsules/tree/239273de25c607d7a7504e8c6900772fddd15cd3
SigmoidRange
from torch.nn import Module import functools import torch import torch.nn as nn from typing import * def sigmoid_range(x, low, high): """Sigmoid function with range `(low, high)`""" return torch.sigmoid(x) * (high - low) + low class PrePostInitMeta(type): """A metaclass that calls optional `__pre_init__` and `__post_init__` methods""" def __new__(cls, name, bases, dct): x = super().__new__(cls, name, bases, dct) def _pass(self, *args, **kwargs): pass for o in ('__init__', '__pre_init__', '__post_init__'): if not hasattr(x, o): setattr(x, o, _pass) old_init = x.__init__ @functools.wraps(old_init) def _init(self, *args, **kwargs): self.__pre_init__() old_init(self, *args, **kwargs) self.__post_init__() setattr(x, '__init__', _init) return x class Module(nn.Module, metaclass=PrePostInitMeta): """Same as `nn.Module`, but no need for subclasses to call `super().__init__`""" def __pre_init__(self): super().__init__() def __init__(self): pass class SigmoidRange(Module): """Sigmoid module with range `(low, high)`""" def __init__(self, low, high): self.low, self.high = low, high def forward(self, x): return sigmoid_range(x, self.low, self.high) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'low': 4, 'high': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import functools import torch.nn as nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 0.0 tmp3 = tmp1 * tmp2 tmp4 = 4.0 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def sigmoid_range(x, low, high): """Sigmoid function with range `(low, high)`""" return torch.sigmoid(x) * (high - low) + low class PrePostInitMeta(type): """A metaclass that calls optional `__pre_init__` and `__post_init__` methods""" def __new__(cls, name, bases, dct): x = super().__new__(cls, name, bases, dct) def _pass(self, *args, **kwargs): pass for o in ('__init__', '__pre_init__', '__post_init__'): if not hasattr(x, o): setattr(x, o, _pass) old_init = x.__init__ @functools.wraps(old_init) def _init(self, *args, **kwargs): self.__pre_init__() old_init(self, *args, **kwargs) self.__post_init__() setattr(x, '__init__', _init) return x class Module(nn.Module, metaclass=PrePostInitMeta): """Same as `nn.Module`, but no need for subclasses to call `super().__init__`""" def __pre_init__(self): super().__init__() def __init__(self): pass class SigmoidRangeNew(Module): """Sigmoid module with range `(low, high)`""" def __init__(self, low, high): self.low, self.high = low, high def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
davidpfahler/fastai_dev
SigmoidRange
false
10,053
[ "Apache-2.0" ]
0
a86b15f86138a9902e8649e3f745e76a19139ab3
https://github.com/davidpfahler/fastai_dev/tree/a86b15f86138a9902e8649e3f745e76a19139ab3
NormLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/kr/ckrxqy227ju5oy4cidmyb35esvbq3qjjqbbpiqrdjo6j5hkwzhi4.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4.00000001), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.249999999375 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormLayerNew(nn.Module): def __init__(self, mean, std, n=None, eps=1e-08) ->None: super().__init__() self.mean = mean self.std = std self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LMdeLiangMi/captum
NormLayer
false
5,472
[ "BSD-3-Clause" ]
1
8bd9686013fe0ba8996e9b1cbeb0ea8e91512787
https://github.com/LMdeLiangMi/captum/tree/8bd9686013fe0ba8996e9b1cbeb0ea8e91512787
LearnedUpsampling1d
import torch from torch import nn class LearnedUpsampling1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input): batch_size, _, length = input.size() kernel_size, = self.conv_t.kernel_size bias = self.bias.unsqueeze(0).unsqueeze(2).expand(batch_size, self. conv_t.out_channels, length, kernel_size).contiguous().view( batch_size, self.conv_t.out_channels, length * kernel_size) return self.conv_t(input) + bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class LearnedUpsampling1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input_0): primals_2 = self.bias primals_1 = self.conv_t.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fdb/samplernn-pytorch
LearnedUpsampling1d
false
15,345
[ "MIT" ]
259
87ce71cc2cf26601a271648597f198df33059f96
https://github.com/fdb/samplernn-pytorch/tree/87ce71cc2cf26601a271648597f198df33059f96
NaiveGroupNorm
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import init import torch.nn.parallel class NaiveGroupNorm(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): N, C, H, W = input.size() assert C % self.num_groups == 0 input = input.reshape(N, self.num_groups, -1) mean = input.mean(dim=-1, keepdim=True) var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2 std = torch.sqrt(var + self.eps) input = (input - mean) / std input = input.reshape(N, C, H, W) if self.affine: input = input * self.weight.reshape(1, C, 1, 1 ) + self.bias.reshape(1, C, 1, 1) return input def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn import Parameter from torch.nn import init import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf1, buf3 class NaiveGroupNormNew(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNormNew, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
EllisHui/outOfRailWay
NaiveGroupNorm
false
427
[ "BSD-2-Clause" ]
0
e3bf9aaa18879bee5536740d55006c872f06278f
https://github.com/EllisHui/outOfRailWay/tree/e3bf9aaa18879bee5536740d55006c872f06278f
QRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nt/cntfuccf5likrxmjfhf7ci2kcrcvhvrhwcbyjbtnwc24lfr23xmz.py # Topologically Sorted Source Nodes: [q_bar], Original ATen: [aten.mean] # Source node to ATen node mapping: # q_bar => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0, 2, 3]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = (rindex // 16) x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/76/c763g2dm4264ss6vfjeky27pd43x3xyr3liuvrqgc4kgai24lj7g.py # Topologically Sorted Source Nodes: [q_bar, log, mul, qbar_log_S], Original ATen: [aten.mean, aten.log, aten.mul, aten.sum] # Source node to ATen node mapping: # log => log # mul => mul # q_bar => mean # qbar_log_S => sum_1 # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0, 2, 3]), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mean,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %log), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) triton_per_fused_log_mean_mul_sum_1 = async_compile.triton('triton_per_fused_log_mean_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_mean_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_log_mean_mul_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 64.0 tmp2 = tmp0 / tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uc/cucdsxx4mqgwbexjohnr77euymo64cqsxlygdc6xijcomgyerfue.py # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tl.store(out_ptr0 + (x2 + (4*y3)), tmp1, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4q/c4qvkmdkhztaqsclmaqsdnjb34q22zutmp6srvwwwjydssjmtmyu.py # Topologically Sorted Source Nodes: [q_log_p, loss], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # loss => sub # q_log_p => mean_1 # Graph fragment: # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%view_3,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %mean_1), kwargs = {}) triton_per_fused_mean_sub_4 = async_compile.triton('triton_per_fused_mean_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_sub_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp4 = tl.load(in_out_ptr0 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = 64.0 tmp7 = tmp3 / tmp6 tmp8 = tmp5 - tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [q_bar], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [q_bar, log, mul, qbar_log_S], Original ATen: [aten.mean, aten.log, aten.mul, aten.sum] triton_per_fused_log_mean_mul_sum_1.run(buf0, buf1, 1, 4, grid=grid(1), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(arg0_1, buf2, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(arg1_1, buf3, 64, 4, grid=grid(64, 4), stream=stream0) del arg1_1 buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) del buf2 del buf3 buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [q_log_p, loss], Original ATen: [aten.mean, aten.sub] triton_per_fused_mean_sub_4.run(buf6, buf4, 1, 64, grid=grid(1), stream=stream0) del buf4 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_per_fused_log_mean_mul_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 64.0 tmp2 = tmp0 / tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tl.store(out_ptr0 + (x2 + 4 * y3), tmp1, xmask & ymask) @triton.jit def triton_per_fused_mean_sub_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_out_ptr0 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = 64.0 tmp7 = tmp3 / tmp6 tmp8 = tmp5 - tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_log_mean_mul_sum_1[grid(1)](buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](arg0_1, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](arg1_1, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) del buf2 del buf3 buf6 = buf1 del buf1 triton_per_fused_mean_sub_4[grid(1)](buf6, buf4, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf6, class QRLossNew(Module): """The QR (forward) loss between class probabilities and predictions. This loss is defined in `'Resolving label uncertainty with implicit generative models' <https://openreview.net/forum?id=AEa_UepnMDX>`_. .. versionadded:: 0.2 """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ethanwhite/torchgeo
QRLoss
false
15,313
[ "MIT" ]
678
cb20e1abfd9213f9ee7700df972385db13568642
https://github.com/ethanwhite/torchgeo/tree/cb20e1abfd9213f9ee7700df972385db13568642
DeepSVDDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/yz/cyzzwmigglflirqvx3z34vnjywtdaenzs4wt4dm747p5wxvo6org.py # Topologically Sorted Source Nodes: [sub, pow_1, dist, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean] # Source node to ATen node mapping: # dist => sum_1 # loss => mean # pow_1 => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) triton_per_fused_mean_pow_sub_sum_0 = async_compile.triton('triton_per_fused_mean_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, dist, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_pow_sub_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from functools import reduce import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_pow_sub_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class BaseModule(nn.Module): """ Implements the basic module. All other modules inherit from this one """ def load_w(self, checkpoint_path): """ Loads a checkpoint into the state_dict. :param checkpoint_path: the checkpoint file to be loaded. """ device = torch.device('cuda:' + '1') self.load_state_dict(torch.load(checkpoint_path, map_location=device)) def __repr__(self): """ String representation """ good_old = super(BaseModule, self).__repr__() addition = 'Total number of parameters: {:,}'.format(self.n_parameters) return good_old + '\n' + addition def __call__(self, *args, **kwargs): return super(BaseModule, self).__call__(*args, **kwargs) @property def n_parameters(self): """ Number of parameters of the model. """ n_parameters = 0 for p in self.parameters(): if hasattr(p, 'mask'): n_parameters += torch.sum(p.mask).item() else: n_parameters += reduce(mul, p.shape) return int(n_parameters) class DeepSVDDLossNew(BaseModule): """ Implements the reconstruction loss. """ def __init__(self, c, R, nu, objective): """ Class constructor. """ super(DeepSVDDLossNew, self).__init__() self.c = c self.R = R self.nu = nu self.objective = objective def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019
DeepSVDDLoss
false
8,595
[ "MIT" ]
12
b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
https://github.com/NjuHaoZhang/AutoregressModel-AE_VAD_CVPR2019/tree/b9843f34ecb59f908d78ddf977ee4670e0ed6cb4
NLKProjection
import torch from torch import nn import torch.nn.functional as F class TwoLayerNet(nn.Module): def __init__(self, dim, hidden_dim, output_dim): super(TwoLayerNet, self).__init__() self.layer1 = nn.Linear(dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) nn.init.xavier_uniform_(self.layer1.weight) nn.init.xavier_uniform_(self.layer2.weight) def forward(self, emb): return self.layer2(F.relu(self.layer1(emb))) class NLKProjection(nn.Module): def __init__(self, dim, hidden_dim, group_num): super(NLKProjection, self).__init__() self.dim, self.hidden_dim, self.concat_dim = (dim, hidden_dim, 2 * dim + group_num) self.MLP1 = TwoLayerNet(dim, hidden_dim, dim) self.MLP2 = TwoLayerNet(dim, hidden_dim, dim) self.MLP3 = TwoLayerNet(self.concat_dim, hidden_dim, dim) self.MLP4 = TwoLayerNet(self.concat_dim, hidden_dim, dim) def forward(self, origin_center, origin_offset, x_new): z1 = self.MLP1(origin_center) z2 = self.MLP2(origin_offset) final_input = torch.cat([z1, z2, x_new], dim=-1) new_offset = self.MLP3(final_input) new_center = self.MLP4(final_input) return torch.cat([new_center, new_offset, x_new], dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'hidden_dim': 4, 'group_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_12, (4, 12), (12, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 12), (12, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf4, primals_7, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_10 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_1[grid(768)](buf2, buf5, primals_11, buf6, 768, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 extern_kernels.mm(reinterpret_tensor(buf6, (64, 12), (12, 1), 0), reinterpret_tensor(primals_12, (12, 4), (1, 12), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf8, primals_13, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf9 = buf2 del buf2 extern_kernels.addmm(primals_15, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_15 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 12), (12, 1), 0), reinterpret_tensor(primals_16, (12, 4), (1, 12), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf11, primals_17, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_19, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_19 buf13 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_1[grid(768)](buf12, buf9, primals_11, buf13, 768, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf9 del primals_11 return (buf13, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf6, (64, 12), (12, 1), 0), reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor( buf11, (64, 4), (4, 1), 0), primals_18, buf14, primals_16, primals_14, buf15, primals_12, primals_9, buf16, primals_4, buf17) class TwoLayerNet(nn.Module): def __init__(self, dim, hidden_dim, output_dim): super(TwoLayerNet, self).__init__() self.layer1 = nn.Linear(dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) nn.init.xavier_uniform_(self.layer1.weight) nn.init.xavier_uniform_(self.layer2.weight) def forward(self, emb): return self.layer2(F.relu(self.layer1(emb))) class NLKProjectionNew(nn.Module): def __init__(self, dim, hidden_dim, group_num): super(NLKProjectionNew, self).__init__() self.dim, self.hidden_dim, self.concat_dim = (dim, hidden_dim, 2 * dim + group_num) self.MLP1 = TwoLayerNet(dim, hidden_dim, dim) self.MLP2 = TwoLayerNet(dim, hidden_dim, dim) self.MLP3 = TwoLayerNet(self.concat_dim, hidden_dim, dim) self.MLP4 = TwoLayerNet(self.concat_dim, hidden_dim, dim) def forward(self, input_0, input_1, input_2): primals_1 = self.MLP1.layer1.weight primals_2 = self.MLP1.layer1.bias primals_4 = self.MLP1.layer2.weight primals_5 = self.MLP1.layer2.bias primals_6 = self.MLP2.layer1.weight primals_7 = self.MLP2.layer1.bias primals_9 = self.MLP2.layer2.weight primals_10 = self.MLP2.layer2.bias primals_12 = self.MLP3.layer1.weight primals_13 = self.MLP3.layer1.bias primals_14 = self.MLP3.layer2.weight primals_15 = self.MLP3.layer2.bias primals_16 = self.MLP4.layer1.weight primals_17 = self.MLP4.layer1.bias primals_18 = self.MLP4.layer2.weight primals_19 = self.MLP4.layer2.bias primals_3 = input_0 primals_8 = input_1 primals_11 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
HKUST-KnowComp/EFO-1-QA-benchmark
NLKProjection
false
17,357
[ "MIT" ]
9
600fb02c76ab631f93ee362ceb789216ec085790
https://github.com/HKUST-KnowComp/EFO-1-QA-benchmark/tree/600fb02c76ab631f93ee362ceb789216ec085790
UnpoolAvgEquiangular
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iq/ciqv7bi5u5n5lklll775de7zs2dribgtfk5mjtugtmgprxav7lxa.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index, aten.clone] # Source node to ATen node mapping: # x_1 => _unsafe_index, clone # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%permute, [None, None, %unsqueeze, %convert_element_type_3]), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%_unsafe_index,), kwargs = {memory_format: torch.channels_last}) triton_poi_fused__unsafe_index_clone_0 = async_compile.triton('triton_poi_fused__unsafe_index_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 32) % 2 x1 = (xindex // 4) % 8 x0 = xindex % 4 x3 = (xindex // 64) x5 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (x0 + (4*tmp8) + (16*x3)), xmask) tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 8), (64, 1, 32, 4), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index, aten.clone] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 32 % 2 x1 = xindex // 4 % 8 x0 = xindex % 4 x3 = xindex // 64 x5 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp3.to(tl.int32) tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (x0 + 4 * tmp8 + 16 * x3), xmask) tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 8), (64, 1, 32, 4), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), def equiangular_dimension_unpack(nodes, ratio): """Calculate the two underlying dimensions from the total number of nodes Args: nodes (int): combined dimensions ratio (float): ratio between the two dimensions Returns: int, int: separated dimensions """ dim1 = int((nodes / ratio) ** 0.5) dim2 = int((nodes * ratio) ** 0.5) if dim1 * dim2 != nodes: if nodes % dim1 == 0: dim2 = nodes // dim1 if nodes % dim2 == 0: dim1 = nodes // dim2 assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}' return dim1, dim2 def equiangular_calculator(tensor, ratio): N, M, F = tensor.size() dim1, dim2 = equiangular_dimension_unpack(M, ratio) tensor = tensor.view(N, dim1, dim2, F) return tensor def reformat(x): """Reformat the input from a 4D tensor to a 3D tensor Args: x (:obj:`torch.tensor`): a 4D tensor Returns: :obj:`torch.tensor`: a 3D tensor """ x = x.permute(0, 2, 3, 1) N, D1, D2, Feat = x.size() x = x.view(N, D1 * D2, Feat) return x class UnpoolAvgEquiangularNew(torch.nn.Module): """EquiAngular average unpooling Parameters ---------- ratio : float Parameter for equiangular sampling -> width/height """ def __init__(self, ratio, kernel_size, *args, **kwargs): self.ratio = ratio self.kernel_size = int(kernel_size ** 0.5) super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ownzonefeng/weather_prediction
UnpoolAvgEquiangular
false
7,426
[ "MIT" ]
1
723c02b6b3c0a40751d87572b66c7a4e040dec92
https://github.com/ownzonefeng/weather_prediction/tree/723c02b6b3c0a40751d87572b66c7a4e040dec92
MinimaxDiscriminatorLoss
import torch import torch.nn as nn import torch.nn.functional as F def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLoss(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLoss, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, dx, dgz): """Computes the loss for the given input. Args: dx (torch.Tensor) : Output of the Discriminator with real data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return minimax_discriminator_loss(dx, dgz, label_smoothing=self. label_smoothing, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp13 = tl.load(in_ptr1 + r0, None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp14 = triton_helpers.minimum(tmp1, tmp13) tmp15 = tl_math.abs(tmp13) tmp16 = -tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp14 - tmp18 tmp20 = tmp13 - tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp12 / tmp24 tmp26 = tmp23 / tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLossNew(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLossNew, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
torchgan/torchgan
MinimaxDiscriminatorLoss
false
16,609
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
PolicyNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/cn/ccnvkf7kfnskbbfy2kwx55oghjftngamwdttghryrfs4g3fay72l.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/gr/cgrekmp3vmfaskaxgm7wq3mhpcpisrqek37fdagqcpd5rqlnxvep.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vv/cvvhapfmnf3saoxluehcmlbqma5latd76fq263yvdkloxowagavl.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (2, 256), (256, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 16384, grid=grid(16384), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 2), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 128, grid=grid(128), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 128, grid=grid(128), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (2, 256), (256, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf8, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 2), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf6, primals_6, buf7, primals_4, buf8 class PolicyNetworkNew(nn.Module): def __init__(self): super(PolicyNetworkNew, self).__init__() self.fc1 = nn.Linear(4, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
DensoITLab/spinningup_in_pytorch
PolicyNetwork
false
7,955
[ "MIT" ]
11
612d8c4c6593c8c5ecb5a939bf43085daac9e552
https://github.com/DensoITLab/spinningup_in_pytorch/tree/612d8c4c6593c8c5ecb5a939bf43085daac9e552
WeightedBinaryCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/g7/cg7e5goodui2hzv2flznm2xjbqtugobnnulk2o7g2rtwmrt67pkf.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # loss => mul_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %arg2_1), kwargs = {}) triton_poi_fused_binary_cross_entropy_with_logits_mul_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_binary_cross_entropy_with_logits_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp13 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_binary_cross_entropy_with_logits_mul_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_binary_cross_entropy_with_logits_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp13 = tl.load(in_ptr2 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_binary_cross_entropy_with_logits_mul_0[grid(256)]( arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class WeightedBinaryCrossEntropyLossNew(nn.Module): def __init__(self): super(WeightedBinaryCrossEntropyLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
LaudateCorpus1/LIGA-Stereo
WeightedBinaryCrossEntropyLoss
false
13,987
[ "Apache-2.0" ]
56
aee3731a24a0ab1667e633e520cc89be2f135272
https://github.com/LaudateCorpus1/LIGA-Stereo/tree/aee3731a24a0ab1667e633e520cc89be2f135272
TVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/pk/cpk4fxkha5p2i5gkbm5kyfvoyqn3fhzajspzwiol6f6vzv3u6zoc.py # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add] # Source node to ATen node mapping: # add => add # h_tv => sum_1 # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # w_tv => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_add_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 12 r1 = (rindex // 12) r2 = rindex % 3 r3 = (rindex // 3) tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp7 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_pow_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp7 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TVLossNew(nn.Module): def __init__(self): super(TVLossNew, self).__init__() def _tensor_size(self, t): return t.size()[1] * t.size()[2] * t.size()[3] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
GuoShi28/GCP-Net
TVLoss
false
8,160
[ "Apache-2.0" ]
24
cef7513fa242343055af64e612429e4384d3c1d7
https://github.com/GuoShi28/GCP-Net/tree/cef7513fa242343055af64e612429e4384d3c1d7
ConvReg
import torch import torch.nn as nn class ConvReg(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 64, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.t_conv1 = nn.ConvTranspose2d(128, 64, 2, stride=2) self.t_conv2 = nn.ConvTranspose2d(64, 3, 2, stride=2) def forward(self, x): x = self.conv1(x) x = self.relu(x) x = self.conv2(x) x = self.relu(x) x = self.t_conv1(x) x = self.relu(x) x = self.t_conv2(x) x = self.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 3, 2, 2), (12, 4, 2, 1)) assert_size_stride(primals_9, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_4, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch .float32) triton_poi_fused_3[grid(8192, 4)](primals_6, buf3, 8192, 4, XBLOCK= 4, YBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((64, 3, 2, 2), (12, 1, 6, 3), torch.float32) triton_poi_fused_4[grid(192, 4)](primals_8, buf4, 192, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(262144)](buf6, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(131072)](buf8, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_5[grid(262144)](buf10, primals_7, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 3, 64, 64), (12288, 1, 192, 3)) buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_7[grid(12, 4096)](buf11, primals_9, buf12, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf11 del primals_9 return buf12, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf12 class ConvRegNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 64, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.t_conv1 = nn.ConvTranspose2d(128, 64, 2, stride=2) self.t_conv2 = nn.ConvTranspose2d(64, 3, 2, stride=2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.t_conv1.weight primals_7 = self.t_conv1.bias primals_8 = self.t_conv2.weight primals_9 = self.t_conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Guru-Uni-siegen/Domain-Shifting-Network
ConvReg
false
11,475
[ "MIT" ]
0
dd9eb7bda07634874497a335151b5e967aaad874
https://github.com/Guru-Uni-siegen/Domain-Shifting-Network/tree/dd9eb7bda07634874497a335151b5e967aaad874
PCK
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r5/cr56rlcvtfyi52hk4c74nr7txue2zp6d35puxsdjrhq3rioeyj4k.py # Topologically Sorted Source Nodes: [sub, norm, lt, dist_2d], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.lt, aten._to_copy] # Source node to ATen node mapping: # dist_2d => convert_element_type # lt => lt # norm => pow_1, pow_2, sum_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%pow_2, 150), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float64), kwargs = {}) triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0 = async_compile.triton('triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 150.0 tmp21 = tmp19 < tmp20 tmp22 = tmp21.to(tl.float64) tl.store(out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float64) # Topologically Sorted Source Nodes: [sub, norm, lt, dist_2d], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.lt, aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 150.0 tmp21 = tmp19 < tmp20 tmp22 = tmp21.to(tl.float64) tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float64) get_raw_stream(0) triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class BaseMetric(nn.Module): def forward(self, y_pr, points_gt, gt_mask=None): """ Base forward method for metric evaluation Args: y_pr: 3D prediction of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) points_gt: 3D gt of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) gt_mask: boolean mask, tensor of shape (BATCH_SIZExN_JOINTS). Applied to results, if provided Returns: Metric as single value, if reduction is given, or as a tensor of values """ pass class PCKNew(BaseMetric): """ Percentage of correct keypoints according to a thresold value. Usually default threshold is 150mm """ def __init__(self, reduction=None, threshold=150, **kwargs): super().__init__(**kwargs) self.thr = threshold self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
miracleyoo/lifting_events_to_3d_hpe
PCK
false
10,600
[ "Apache-2.0" ]
0
dfe734ee055900d6ab90c064bf82db7672830ac7
https://github.com/miracleyoo/lifting_events_to_3d_hpe/tree/dfe734ee055900d6ab90c064bf82db7672830ac7
Net
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim import torch.autograd class Net(nn.Module): def __init__(self, STATE_NUM, ACTION_NUM): super(Net, self).__init__() self.fc1 = nn.Linear(in_features=STATE_NUM, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=ACTION_NUM) def forward(self, x): x = self.fc1(x) x = F.relu(x) action_value = self.fc2(x) return action_value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'STATE_NUM': 4, 'ACTION_NUM': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3 class NetNew(nn.Module): def __init__(self, STATE_NUM, ACTION_NUM): super(NetNew, self).__init__() self.fc1 = nn.Linear(in_features=STATE_NUM, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=ACTION_NUM) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ChangQingAAS/Deep-Reinforcement-Learning
Net
false
233
[ "MIT" ]
0
3bc1381c632b1730a48e63e972aea62086c4287c
https://github.com/ChangQingAAS/Deep-Reinforcement-Learning/tree/3bc1381c632b1730a48e63e972aea62086c4287c
ScaleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yn/cynw6cijkr7y2as6ogxkwx5qt4wbidmg3yocmoqlnbzb4jxtevqv.py # Topologically Sorted Source Nodes: [norm, clamp, n, truediv, x], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.mul, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # n => mul # norm => pow_1, pow_2, sum_1 # truediv => div # x => mul_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %mul), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (0)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tmp0 / tmp16 tmp20 = tmp17 * tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, n, truediv, x], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tmp0 / tmp16 tmp20 = tmp17 * tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)]( primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaleNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, input_0): primals_2 = self.g primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Piki1989/spacetimeformer
ScaleNorm
false
14,190
[ "MIT" ]
209
7e0caf17dd03e5d25e2766c4f7132805779bcc40
https://github.com/Piki1989/spacetimeformer/tree/7e0caf17dd03e5d25e2766c4f7132805779bcc40
SeparableConv2d_same
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py # Topologically Sorted Source Nodes: [padded_inputs], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # padded_inputs => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_inputs], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) return (buf2, primals_2, primals_3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) return buf2, primals_2, primals_3, buf0, buf1 def fixed_padding(inputs, kernel_size, rate): kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end)) return padded_inputs class SeparableConv2d_sameNew(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation= 1, bias=False): super(SeparableConv2d_sameNew, self).__init__() self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias) self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.pointwise.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Gummary/Pytorch-Project-Template
SeparableConv2d_same
false
505
[ "MIT" ]
0
56bc5e253627d40fb8771eccdb2bb663c833beb3
https://github.com/Gummary/Pytorch-Project-Template/tree/56bc5e253627d40fb8771eccdb2bb663c833beb3
AdditiveAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2x/c2x66b6sza3svon43c774fqn45xzpdlajk7fj3gf6dzmp6nxl7jx.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = (xindex // 256) x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oe/coezvwyo2fyfsqghp3geqsqeeuhyvxcoe7thtujlv6einfmi73d5.py # Topologically Sorted Source Nodes: [mask, invert, setitem], Original ATen: [aten._to_copy, aten.bitwise_not, aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # invert => bitwise_not # mask => convert_element_type # setitem => full_default, index_put # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_9, torch.bool), kwargs = {}) # %bitwise_not : [num_users=2] = call_function[target=torch.ops.aten.bitwise_not.default](args = (%convert_element_type,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%squeeze, [%bitwise_not], %full_default), kwargs = {}) triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1', 'mutated_arg_names': ['in_ptr1', 'out_ptr2'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = (tmp0 != 0) tmp2 = tmp1 == 0 tmp4 = float("-inf") tmp5 = tl.where(tmp2, tmp4, tmp3) tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr2 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf0, primals_2, buf1, primals_6, buf2, 1024, grid=grid(1024), stream=stream0) del buf0 del primals_2 del primals_6 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mask, invert, setitem], Original ATen: [aten._to_copy, aten.bitwise_not, aten.lift_fresh, aten.index_put] triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1.run(primals_9, buf4, buf5, buf4, 256, grid=grid(256), stream=stream0) del primals_9 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf2, buf5, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp2 = tmp1 == 0 tmp4 = float('-inf') tmp5 = tl.where(tmp2, tmp4, tmp3) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr2 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_6, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_6 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused__to_copy_bitwise_not_index_put_lift_fresh_1[grid(256) ](primals_9, buf4, buf5, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf2, buf5, primals_7 class AdditiveAttentionNew(nn.Module): """ Originally from: https://arxiv.org/pdf/1409.0473v5.pdf Also referenced to as Content Based Attention: https://arxiv.org/pdf/1506.03134v1.pdf Attention is learned for a query vector over a set of vectors. If we have a query vector and then a key matrix with the size (n,k), we will create attention vector over n NOTE! We mask out the attention scores for the positions which its impossible for the segments to attend to. I.e. the padding. """ def __init__(self, input_dim: 'int'): super().__init__() self.W1 = nn.Linear(input_dim, input_dim) self.W2 = nn.Linear(input_dim, input_dim) self.v = nn.Linear(input_dim, 1) def forward(self, input_0, input_1, input_2): primals_1 = self.W1.weight primals_2 = self.W1.bias primals_5 = self.W2.weight primals_6 = self.W2.bias primals_7 = self.v.weight primals_8 = self.v.bias primals_3 = input_0 primals_4 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AxlAlm/SegNLP
AdditiveAttention
false
4,881
[ "Apache-2.0" ]
1
89b8d077952397dfcea089376b373b117bcf6a65
https://github.com/AxlAlm/SegNLP/tree/89b8d077952397dfcea089376b373b117bcf6a65
TransposeGatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vk/cvkcyzml7vrfy2i3zs2tgjrz2g3re65xluovrlmik7fmdvw7f3uo.py # Topologically Sorted Source Nodes: [norm, add, truediv], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add_4 # norm => pow_1, pow_2, sum_1 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%squeeze, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%squeeze, %add_4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_1 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fu/cfu7hyunx5ghrb4pvgnxxnkvmgplfrecz32xriljo6ebdst3haad.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1, sigma, sigma_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul, aten.sum] # Source node to ATen node mapping: # add_1 => add_5 # norm_1 => pow_3, pow_4, sum_2 # sigma => mul_4 # sigma_1 => sum_3 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%squeeze_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%squeeze_1, %add_5), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %squeeze_1), kwargs = {}) # %sum_3 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_4,), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mul_sum_2 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mul_sum_2(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8 * tmp0 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2r/c2rc442g4q5j7ue6cvpvh3io7ghkfdrmn76o244doamnt7unobsn.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_3 = async_compile.triton('triton_poi_fused_div_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dg/cdgmuvildyld52vxzyu4vek5xyxllfoowazacbtlvri2cbtd4afh.py # Topologically Sorted Source Nodes: [conv, conv_1, mask, gated_mask, x_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # conv => convolution # conv_1 => gt, mul_5, where # gated_mask => sigmoid # mask => convolution_1 # x_2 => mul_7 # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %div_2, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_5), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %div_5, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sigmoid), kwargs = {}) triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 0.2 tmp9 = tmp2 * tmp8 tmp10 = tl.where(tmp7, tmp2, tmp9) tmp11 = tl.sigmoid(tmp5) tmp12 = tmp10 * tmp11 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), reinterpret_tensor(primals_2, (4, 1), (1, 1), 0), out=buf1) buf3 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm, add, truediv], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_1.run(buf1, buf3, 1, 64, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (4, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 1), (1, 0), 0), out=buf4) buf6 = empty_strided_cuda((), (), torch.float32) buf20 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1, sigma, sigma_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul, aten.sum] triton_per_fused_add_div_linalg_vector_norm_mul_sum_2.run(buf4, buf6, buf20, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_3.run(primals_4, buf6, buf7, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf0, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), reinterpret_tensor(primals_6, (4, 1), (1, 1), 0), out=buf10) buf12 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_2, add_2, truediv_3], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_1.run(buf10, buf12, 1, 64, grid=grid(1), stream=stream0) buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_8, (4, 64), (64, 1), 0), reinterpret_tensor(buf12, (64, 1), (1, 0), 0), out=buf13) buf15 = empty_strided_cuda((), (), torch.float32) buf27 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_3, add_3, truediv_4, sigma_2, sigma_3], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul, aten.sum] triton_per_fused_add_div_linalg_vector_norm_mul_sum_2.run(buf13, buf15, buf27, 1, 4, grid=grid(1), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_5], Original ATen: [aten.div] triton_poi_fused_div_3.run(primals_8, buf15, buf16, 256, grid=grid(256), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf0, buf16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 5, 5), (100, 25, 5, 1)) buf9 = buf8; del buf8 # reuse buf18 = buf17; del buf17 # reuse buf19 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [conv, conv_1, mask, gated_mask, x_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.sigmoid, aten.mul] triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4.run(buf9, buf18, primals_5, primals_9, buf19, 400, grid=grid(400), stream=stream0) del primals_5 del primals_9 # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] buf21 = torch.ops.aten.set_.source_Tensor(primals_2, buf20) assert_size_stride(buf21, (4, ), (1, )) del buf1 del primals_2 # Topologically Sorted Source Nodes: [], Original ATen: [] buf24 = torch.ops.aten.set_.source_Tensor(primals_3, buf3) assert_size_stride(buf24, (64, ), (1, )) del buf4 del primals_3 # Topologically Sorted Source Nodes: [norm_3, add_3, truediv_4], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] buf28 = torch.ops.aten.set_.source_Tensor(primals_6, buf27) assert_size_stride(buf28, (4, ), (1, )) del buf10 del primals_6 # Topologically Sorted Source Nodes: [], Original ATen: [] buf31 = torch.ops.aten.set_.source_Tensor(primals_7, buf12) assert_size_stride(buf31, (64, ), (1, )) del buf13 del primals_7 return (buf19, buf7, buf16, buf0, buf6, buf7, buf9, buf15, buf16, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mul_sum_2(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = tmp8 * tmp0 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None) @triton.jit def triton_poi_fused_div_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 0.2 tmp9 = tmp2 * tmp8 tmp10 = tl.where(tmp7, tmp2, tmp9) tmp11 = tl.sigmoid(tmp5) tmp12 = tmp10 * tmp11 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (1, 64), 0 ), reinterpret_tensor(primals_2, (4, 1), (1, 1), 0), out=buf1) buf3 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_1[grid(1)](buf1, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (4, 64), (64, 1), 0 ), reinterpret_tensor(buf3, (64, 1), (1, 0), 0), out=buf4) buf6 = empty_strided_cuda((), (), torch.float32) buf20 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mul_sum_2[grid(1)](buf4, buf6, buf20, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_3[grid(256)](primals_4, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf8 = extern_kernels.convolution(buf0, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (1, 64), 0 ), reinterpret_tensor(primals_6, (4, 1), (1, 1), 0), out=buf10) buf12 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_1[grid(1)](buf10, buf12, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (4, 64), (64, 1), 0 ), reinterpret_tensor(buf12, (64, 1), (1, 0), 0), out=buf13) buf15 = empty_strided_cuda((), (), torch.float32) buf27 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mul_sum_2[grid(1)](buf13, buf15, buf27, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_3[grid(256)](primals_8, buf15, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf17 = extern_kernels.convolution(buf0, buf16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 5, 5), (100, 25, 5, 1)) buf9 = buf8 del buf8 buf18 = buf17 del buf17 buf19 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) triton_poi_fused_convolution_leaky_relu_mul_sigmoid_4[grid(400)](buf9, buf18, primals_5, primals_9, buf19, 400, XBLOCK=128, num_warps= 4, num_stages=1) del primals_5 del primals_9 buf21 = torch.ops.aten.set_.source_Tensor(primals_2, buf20) assert_size_stride(buf21, (4,), (1,)) del buf1 del primals_2 buf24 = torch.ops.aten.set_.source_Tensor(primals_3, buf3) assert_size_stride(buf24, (64,), (1,)) del buf4 del primals_3 buf28 = torch.ops.aten.set_.source_Tensor(primals_6, buf27) assert_size_stride(buf28, (4,), (1,)) del buf10 del primals_6 buf31 = torch.ops.aten.set_.source_Tensor(primals_7, buf12) assert_size_stride(buf31, (64,), (1,)) del buf13 del primals_7 return buf19, buf7, buf16, buf0, buf6, buf7, buf9, buf15, buf16, buf18 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mm(torch.t(w.view(height, -1).data), u.data.view(-1, 1)).squeeze(1)) u.data = l2normalize(torch.mm(w.view(height, -1).data, v.data. view(-1, 1)).squeeze(1)) sigma = u.data * w.view(height, -1).data.mm(v.data.view(-1, 1) ).squeeze(1) sigma = sigma.sum() setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, activation='elu', sn=False): super(GatedConv2d, self).__init__() self.pad = nn.ZeroPad2d(padding) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) self.mask_conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation= dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.mask_conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) self.sigmoid = torch.nn.Sigmoid() def forward(self, x): x = self.pad(x) conv = self.conv2d(x) if self.activation: conv = self.activation(conv) mask = self.mask_conv2d(x) gated_mask = self.sigmoid(mask) x = conv * gated_mask return x class TransposeGatedConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, activation='lrelu', sn=True, scale_factor=2): super(TransposeGatedConv2dNew, self).__init__() self.scale_factor = scale_factor self.gated_conv2d = GatedConv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, activation, sn) def forward(self, input_0): primals_2 = self.gated_conv2d.conv2d.module.bias primals_5 = self.gated_conv2d.conv2d.module.weight_u primals_3 = self.gated_conv2d.conv2d.module.weight_v primals_1 = self.gated_conv2d.conv2d.module.weight_bar primals_6 = self.gated_conv2d.mask_conv2d.module.bias primals_9 = self.gated_conv2d.mask_conv2d.module.weight_u primals_7 = self.gated_conv2d.mask_conv2d.module.weight_v primals_4 = self.gated_conv2d.mask_conv2d.module.weight_bar primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
delldu/DeepFillv2
TransposeGatedConv2d
false
6,565
[ "MIT" ]
1
a564b9589c1b42bcdddd3d7601f4059c4594a439
https://github.com/delldu/DeepFillv2/tree/a564b9589c1b42bcdddd3d7601f4059c4594a439
ImageLinearAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/fb/cfbyh7temr2u5cam5txbekh4n77qsqs7bwxugnr6jx7oljt7yy43.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ui/cuin67jltyp2pqmjyuj7jk5xm32mnpns6lnsj6m7rj5penfpunzh.py # Topologically Sorted Source Nodes: [k_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # k_3 => exp, sum_1 # Graph fragment: # %mul_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_2, [2], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_2, %amax_default_1), kwargs = {}) # %mul_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor_1, 0.3535533905932738), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_3,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 512 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (r2 + (64*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float("-inf")) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = 0.3535533905932738 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + (x3), tmp8, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/e4/ce4rwceimtzgrkn6hryj7x2suuhmh2y5y7azzuzwme6dg6vorcfr.py # Topologically Sorted Source Nodes: [k_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # k_3 => div, exp # Graph fragment: # %mul_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_2, %amax_default_1), kwargs = {}) # %mul_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor_1, 0.3535533905932738), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_3,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 1024], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 16 x3 = (xindex // 16) y0 = yindex % 8 y1 = (yindex // 8) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (64*y0) + (512*x2) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3 + (64*y0)), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (y0 + (8*x2) + (128*y1)), xmask & ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (y0 + (8*x2) + (128*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = 0.3535533905932738 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + (y0 + (8*x4) + (8192*y1)), tmp11, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fl/cflyoqdqyoifdgzjzagxzvnxdpl2rmwlevx4kd65xgj7pgguu52i.py # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] # Source node to ATen node mapping: # v => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (8192*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xk/cxka5dpwif2jhpgq2o4djeic2tvlswcqq5ylysqhjtvdnur44oen.py # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] # Source node to ATen node mapping: # context => bmm # Graph fragment: # %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_3, %view_4), kwargs = {}) triton_poi_fused_bmm_4 = async_compile.triton('triton_poi_fused_bmm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((8*x1) + (8192*(x0 // 8)) + (x0 % 8)), None) tl.store(out_ptr0 + (x2), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/h7/ch7mni2ceg3yfpa65pszw6dduzxpoxwcqlucmwkevlvhaokh4nh4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # out_1 => clone, view_11 # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_10,), kwargs = {memory_format: torch.contiguous_format}) # %view_11 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, 512, 4, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_5 = async_compile.triton('triton_poi_fused__unsafe_view_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_5(in_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = (yindex // 64) y5 = yindex y3 = yindex % 512 y4 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (1024*y1)), xmask, eviction_policy='evict_last') tl.store(out_ptr1 + (y3 + (512*x2) + (8192*y4)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ak/cakqdtdgjrrn2wn6ok7glhc4cgech4lq2xoi2fg23uzsycnfeerj.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_2 => convolution_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_11, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (512, ), (1, )) assert_size_stride(primals_4, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (512, ), (1, )) assert_size_stride(primals_8, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 1, 2048, 512)) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 4, 4), (8192, 1, 2048, 512)) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf4 = empty_strided_cuda((4, 8, 1, 16), (128, 1, 512, 8), torch.float32) buf5 = empty_strided_cuda((4, 8, 1, 16), (128, 1, 512, 8), torch.float32) # Topologically Sorted Source Nodes: [k_3], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf2, primals_5, buf4, buf5, 512, 64, grid=grid(512), stream=stream0) buf6 = empty_strided_cuda((4, 8, 64, 16), (8192, 1, 128, 8), torch.float32) # Topologically Sorted Source Nodes: [k_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, primals_5, buf4, buf5, buf6, 32, 1024, grid=grid(32, 1024), stream=stream0) del primals_5 buf7 = buf5; del buf5 # reuse buf8 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [q_3], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf1, primals_3, buf7, buf8, 512, 64, grid=grid(512), stream=stream0) buf9 = reinterpret_tensor(buf2, (4, 8, 64, 16), (8192, 1, 128, 8), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [q_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, primals_3, buf7, buf8, buf9, 32, 1024, grid=grid(32, 1024), stream=stream0) del buf7 del buf8 del primals_3 buf10 = reinterpret_tensor(buf1, (4, 512, 4, 4), (8192, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf3, primals_7, buf10, 2048, 16, grid=grid(2048, 16), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf3, (32, 64, 16), (1, 512, 32), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf6, buf11, 32768, grid=grid(32768), stream=stream0) buf12 = empty_strided_cuda((32, 64, 64), (4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf11, reinterpret_tensor(buf10, (32, 16, 64), (1024, 1, 16), 0), out=buf12) buf13 = reinterpret_tensor(buf11, (32, 16, 64), (1, 32, 512), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] triton_poi_fused_bmm_4.run(buf9, buf13, 32768, grid=grid(32768), stream=stream0) buf14 = empty_strided_cuda((32, 16, 64), (1024, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, buf12, out=buf14) buf16 = reinterpret_tensor(buf13, (4, 512, 4, 4), (8192, 1, 2048, 512), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_5.run(buf14, buf16, 2048, 16, grid=grid(2048, 16), stream=stream0) del buf14 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 4, 4), (64, 1, 16, 4)) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf17, primals_9, buf18, 16, 16, grid=grid(16, 16), stream=stream0) del buf17 del primals_9 return (buf18, buf0, primals_2, primals_4, primals_6, primals_8, buf6, buf9, buf16, reinterpret_tensor(buf12, (32, 64, 64), (4096, 1, 64), 0), reinterpret_tensor(buf10, (32, 64, 16), (1024, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.onnx.operators import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 512 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = 0.3535533905932738 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + x3, tmp8, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 32 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 16 x3 = xindex // 16 y0 = yindex % 8 y1 = yindex // 8 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * y0 + 512 * x2 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3 + 64 * y0), xmask & ymask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr2 + (y0 + 8 * x2 + 128 * y1), xmask & ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (y0 + 8 * x2 + 128 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = 0.3535533905932738 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + (y0 + 8 * x4 + 8192 * y1), tmp11, xmask & ymask) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask) @triton.jit def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 * x1 + 8192 * (x0 // 8) + x0 % 8), None) tl.store(out_ptr0 + x2, tmp0, None) @triton.jit def triton_poi_fused__unsafe_view_clone_5(in_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = yindex // 64 y3 = yindex % 512 y4 = yindex // 512 tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tl.store(out_ptr1 + (y3 + 512 * x2 + 8192 * y4), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_1, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf3 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf4 = empty_strided_cuda((4, 8, 1, 16), (128, 1, 512, 8), torch. float32) buf5 = empty_strided_cuda((4, 8, 1, 16), (128, 1, 512, 8), torch. float32) triton_per_fused__softmax_1[grid(512)](buf2, primals_5, buf4, buf5, 512, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 8, 64, 16), (8192, 1, 128, 8), torch. float32) triton_poi_fused__softmax_2[grid(32, 1024)](buf2, primals_5, buf4, buf5, buf6, 32, 1024, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) del primals_5 buf7 = buf5 del buf5 buf8 = buf4 del buf4 triton_per_fused__softmax_1[grid(512)](buf1, primals_3, buf7, buf8, 512, 64, XBLOCK=8, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (4, 8, 64, 16), (8192, 1, 128, 8), 0) del buf2 triton_poi_fused__softmax_2[grid(32, 1024)](buf1, primals_3, buf7, buf8, buf9, 32, 1024, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) del buf7 del buf8 del primals_3 buf10 = reinterpret_tensor(buf1, (4, 512, 4, 4), (8192, 16, 4, 1), 0) del buf1 triton_poi_fused_convolution_3[grid(2048, 16)](buf3, primals_7, buf10, 2048, 16, XBLOCK=1, YBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf3, (32, 64, 16), (1, 512, 32), 0) del buf3 triton_poi_fused_bmm_4[grid(32768)](buf6, buf11, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((32, 64, 64), (4096, 64, 1), torch.float32) extern_kernels.bmm(buf11, reinterpret_tensor(buf10, (32, 16, 64), ( 1024, 1, 16), 0), out=buf12) buf13 = reinterpret_tensor(buf11, (32, 16, 64), (1, 32, 512), 0) del buf11 triton_poi_fused_bmm_4[grid(32768)](buf9, buf13, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((32, 16, 64), (1024, 64, 1), torch.float32) extern_kernels.bmm(buf13, buf12, out=buf14) buf16 = reinterpret_tensor(buf13, (4, 512, 4, 4), (8192, 1, 2048, 512), 0) del buf13 triton_poi_fused__unsafe_view_clone_5[grid(2048, 16)](buf14, buf16, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf14 buf17 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 4, 4), (64, 1, 16, 4)) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_6[grid(16, 16)](buf17, primals_9, buf18, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf17 del primals_9 return (buf18, buf0, primals_2, primals_4, primals_6, primals_8, buf6, buf9, buf16, reinterpret_tensor(buf12, (32, 64, 64), (4096, 1, 64), 0), reinterpret_tensor(buf10, (32, 64, 16), (1024, 16, 1), 0)) class ImageLinearAttentionNew(nn.Module): def __init__(self, chan, chan_out=None, kernel_size=1, padding=0, stride=1, key_dim=64, value_dim=64, heads=8): super().__init__() self.chan = chan chan_out = chan if chan_out is None else chan_out self.key_dim = key_dim self.value_dim = value_dim self.heads = heads conv_kwargs = {'padding': padding, 'stride': stride} self.to_q = nn.Conv2d(chan, key_dim * heads, kernel_size, **conv_kwargs ) self.to_k = nn.Conv2d(chan, key_dim * heads, kernel_size, **conv_kwargs ) self.to_v = nn.Conv2d(chan, value_dim * heads, kernel_size, ** conv_kwargs) out_conv_kwargs = {'padding': padding} self.to_out = nn.Conv2d(value_dim * heads, chan_out, kernel_size, **out_conv_kwargs) def forward(self, input_0): primals_2 = self.to_q.weight primals_3 = self.to_q.bias primals_4 = self.to_k.weight primals_5 = self.to_k.bias primals_6 = self.to_v.weight primals_7 = self.to_v.bias primals_8 = self.to_out.weight primals_9 = self.to_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
CUMLSec/stateformer
ImageLinearAttention
false
7,916
[ "MIT" ]
41
87cb3c906c43fcff42b2ca820eb6e7fd918d0a1c
https://github.com/CUMLSec/stateformer/tree/87cb3c906c43fcff42b2ca820eb6e7fd918d0a1c
CAModule
import torch import torch.nn as nn class CAModule(nn.Module): """ Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* code reference: https://github.com/kobiso/CBAM-keras/blob/master/models/attention_module.py """ def __init__(self, num_channels, reduc_ratio=2): super(CAModule, self).__init__() self.num_channels = num_channels self.reduc_ratio = reduc_ratio self.fc1 = nn.Linear(num_channels, num_channels // reduc_ratio, bias=True) self.fc2 = nn.Linear(num_channels // reduc_ratio, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, feat_map): gap_out = feat_map.view(feat_map.size()[0], self.num_channels, -1 ).mean(dim=2) fc1_out = self.relu(self.fc1(gap_out)) fc2_out = self.sigmoid(self.fc2(fc1_out)) fc2_out = fc2_out.view(fc2_out.size()[0], fc2_out.size()[1], 1, 1) feat_map = torch.mul(feat_map, fc2_out) return feat_map def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4 ), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, buf1, buf3, buf4, primals_4 class CAModuleNew(nn.Module): """ Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* code reference: https://github.com/kobiso/CBAM-keras/blob/master/models/attention_module.py """ def __init__(self, num_channels, reduc_ratio=2): super(CAModuleNew, self).__init__() self.num_channels = num_channels self.reduc_ratio = reduc_ratio self.fc1 = nn.Linear(num_channels, num_channels // reduc_ratio, bias=True) self.fc2 = nn.Linear(num_channels // reduc_ratio, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
C3-ASV-Team/torchxrayvision
CAModule
false
4,929
[ "Apache-2.0" ]
1
7e53f0606986562f17a1ffd9f31d006756eff78d
https://github.com/C3-ASV-Team/torchxrayvision/tree/7e53f0606986562f17a1ffd9f31d006756eff78d
ScaledDotProductAttention
import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature log_attn = F.log_softmax(attn, 2) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn, log_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 return buf3, buf2, buf5 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2]
RuaBQ/FEAT
ScaledDotProductAttention
false
2,823
[ "MIT" ]
0
e46f56b03f8ef820d549cb385600a12bdf224de9
https://github.com/RuaBQ/FEAT/tree/e46f56b03f8ef820d549cb385600a12bdf224de9
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/tj/ctjenrmpmnfu2d3pk5t7qu4ih2v4kysuba4mugrps3thgibhpgig.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] # Source node to ATen node mapping: # add_1 => add_1 # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_5, neg, sub_3, sub_4, sub_5 # focal_weight => mul_4 # loss => mul_6 # loss_1 => mean # loss_cls => mul_7 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # pred_sigmoid => sigmoid # pt => add # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, %sub_4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.75), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %mul_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLossNew(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
FocalLoss
false
2,040
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
CNN_MNIST
import torch import torch.nn as nn import torch.nn.functional as F class CNN_MNIST(nn.Module): def __init__(self, num_channels, num_classes): super(CNN_MNIST, self).__init__() self.conv1 = nn.Conv2d(num_channels, 32, 3, stride=1, padding=0) self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=0) self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=0) self.pool1 = nn.MaxPool2d(2) self.pool2 = nn.MaxPool2d(2) self.fc1 = nn.Linear(3 * 3 * 64, 64, bias=True) self.fc2 = nn.Linear(64, num_classes) def forward(self, X): x = F.relu(self.conv1(X)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = F.relu(self.conv3(x)) x = x.reshape(-1, 3 * 3 * 64) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_channels': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x2 = xindex // 961 x5 = xindex x4 = xindex // 30752 x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 30848 * x4), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 215296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 841 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 % 14 x2 = xindex // 196 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 576), (576, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (4, 64), (64, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(492032)](buf0, primals_2, buf1, 492032, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(123008)](buf1, buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 29, 29), (53824, 841, 29, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(215296)](buf5, primals_5, 215296, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.float32) buf7 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(50176)](buf5, buf6, buf7, 50176, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 12, 12), (9216, 144, 12, 1)) buf9 = buf8 del buf8 buf13 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(36864)]( buf9, primals_7, buf13, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 576), (576, 1), 0), reinterpret_tensor(primals_8, (576, 64), (1, 576), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(4096)](buf11, primals_9, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, reinterpret_tensor(buf9, (64, 576), (576, 1 ), 0), buf11, primals_10, primals_8, buf13) class CNN_MNISTNew(nn.Module): def __init__(self, num_channels, num_classes): super(CNN_MNISTNew, self).__init__() self.conv1 = nn.Conv2d(num_channels, 32, 3, stride=1, padding=0) self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=0) self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=0) self.pool1 = nn.MaxPool2d(2) self.pool2 = nn.MaxPool2d(2) self.fc1 = nn.Linear(3 * 3 * 64, 64, bias=True) self.fc2 = nn.Linear(64, num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Billy1900/Noise-Adaption-Layer
CNN_MNIST
false
16,999
[ "MIT" ]
5
57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8
https://github.com/Billy1900/Noise-Adaption-Layer/tree/57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8
FCN32s
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yh/cyhcthmybpwuhajc2prpx2buu2aaanp4xjhlfcddqe2a2vxploid.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 20 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 5 y1 = (yindex // 5) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (5*x2) + (20480*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ug/cugiuc33kwmzgnaeznqxdpmk7r65jz2veybmiu3ozf6tpjzo34kf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 320 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 5 y1 = (yindex // 5) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (5*x2) + (45*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4i/c4islqctnux7quywor4ljttjc6krtgvecvzfsjd2pvp4i6z2bufb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2097152 xnumel = 49 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (25088*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/md/cmdsupo5iyzolhpw6ahmecqsejr37jfeyzsgf7lnfmc3sylo6hn7.py # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # h => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [100, 100], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 17572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/z5/cz5w3p5clskvlxhaxinsnisy4dwdncan46mbp45aclwsvbmk3ale.py # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4393216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 131 x2 = (xindex // 8384) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (33536*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (33536*x2)), xmask) tmp3 = tl.load(in_ptr0 + (16768 + x0 + (128*x1) + (33536*x2)), xmask) tmp5 = tl.load(in_ptr0 + (16832 + x0 + (128*x1) + (33536*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/qq/cqqgx4bd67gkdynuvs4ihlgjgq3bvd73tn2htvsxd4ersue7qkvs.py # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # h_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8786432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sd/csdeiw4phywtem76xa34mjgxjkclvuzqzlzin7wo4erpcfmzh76t.py # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2230272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 8448) % 66 x1 = (xindex // 128) % 66 x0 = xindex % 128 x3 = (xindex // 557568) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 131, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16768 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (16896 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tb/ctb5lccopknr7xxr3evsl64ic5r4ufx4pjs6gjvlnwoorj5p2wfc.py # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # h_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4460544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/c4/cc4vltulbg5vj3jzsdzvbqj7ho7giigov24snntuectxulb7wm7h.py # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_9 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) % 33 x2 = (xindex // 8448) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (33792*x2)), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (33792*x2)), xmask) tmp3 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (33792*x2)), xmask) tmp5 = tl.load(in_ptr0 + (17152 + x0 + (512*x1) + (33792*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uh/cuhmvwaq2epkqdsbcpyzl2cbecntikxzjbvwfqzaw27ipfpz7z7p.py # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # h_10 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2230272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/p7/cp7mv7czuycllanudflpzvh5xztkpijbxdp27bonysvmd2pffhsx.py # Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_13 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_17 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 8704) % 17 x1 = (xindex // 512) % 17 x0 = xindex % 512 x3 = (xindex // 147968) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 33, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16896 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (17408 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xq/cxqbn72jjnqokyo55zpsu52c6azgyekra26qjkgequt3brmypgu4.py # Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_10 => convolution_10 # h_14 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/xk/cxki4k5jlgybjbomie7ngclgckyb4jaorjcpddo3vcksb3nflpug.py # Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # h_17 => getitem_8, getitem_9 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) # %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 165888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 4608) % 9 x1 = (xindex // 512) % 9 x0 = xindex % 512 x3 = (xindex // 41472) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 17, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (8704 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (9216 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nj/cnjp6idq6vu3t4k5t2ashy54iuhcp2lh5vwz3433o5cf5r7fmdck.py # Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_13 => convolution_13 # h_18 => relu_13 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 147456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/45/c45l55dp54hb7ihramfhxmgidblcal7beaongbuydibo2hdowmbj.py # Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution] # Source node to ATen node mapping: # h_22 => convolution_15 # Graph fragment: # %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_21 = async_compile.triton('triton_poi_fused_convolution_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wq/cwqrpbigoqv3g4hr6shnaizyxbgw4cquq2tci2levhsi7cjh22nc.py # Topologically Sorted Source Nodes: [h_24], Original ATen: [aten.clone] # Source node to ATen node mapping: # h_24 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_22 = async_compile.triton('triton_poi_fused_clone_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_22(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 64 x2 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (2451 + x0 + (128*x1) + (16384*x2)), None) tl.store(out_ptr0 + (x3), tmp0, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34 = args args.clear() assert_size_stride(primals_1, (4, 5, 64, 64), (20480, 4096, 64, 1)) assert_size_stride(primals_2, (64, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1)) assert_size_stride(primals_29, (4096, ), (1, )) assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_31, (4096, ), (1, )) assert_size_stride(primals_32, (1, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_33, (1, ), (1, )) assert_size_stride(primals_34, (1, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 64, 64), (20480, 1, 320, 5), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 20, 4096, grid=grid(20, 4096), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 5, 3, 3), (45, 1, 15, 5), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 320, 9, grid=grid(320, 9), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_26 buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_9.run(primals_28, buf14, 2097152, 49, grid=grid(2097152, 49), stream=stream0) del primals_28 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf16, primals_3, 17572864, grid=grid(17572864), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_1, h_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf18, primals_5, 17572864, grid=grid(17572864), stream=stream0) del primals_5 buf19 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.int8) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_11.run(buf18, buf19, buf20, 4393216, grid=grid(4393216), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf22, primals_7, 8786432, grid=grid(8786432), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [conv2d_3, h_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf24, primals_9, 8786432, grid=grid(8786432), stream=stream0) del primals_9 buf25 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8) # Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_13.run(buf24, buf25, buf26, 2230272, grid=grid(2230272), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf28, primals_11, 4460544, grid=grid(4460544), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [conv2d_5, h_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf30, primals_13, 4460544, grid=grid(4460544), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [conv2d_6, h_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf32, primals_15, 4460544, grid=grid(4460544), stream=stream0) del primals_15 buf33 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8) # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_15.run(buf32, buf33, buf34, 1115136, grid=grid(1115136), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf36 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf36, primals_17, 2230272, grid=grid(2230272), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf38 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [conv2d_8, h_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf38, primals_19, 2230272, grid=grid(2230272), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [conv2d_9, h_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf40, primals_21, 2230272, grid=grid(2230272), stream=stream0) del primals_21 buf41 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8) # Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_17.run(buf40, buf41, buf42, 591872, grid=grid(591872), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf44 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf44, primals_23, 591872, grid=grid(591872), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf46 = buf45; del buf45 # reuse # Topologically Sorted Source Nodes: [conv2d_11, h_15], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf46, primals_25, 591872, grid=grid(591872), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf48 = buf47; del buf47 # reuse # Topologically Sorted Source Nodes: [conv2d_12, h_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf48, primals_27, 591872, grid=grid(591872), stream=stream0) del primals_27 buf49 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8) # Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_19.run(buf48, buf49, buf50, 165888, grid=grid(165888), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf52 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf52, primals_29, 147456, grid=grid(147456), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf54 = buf53; del buf53 # reuse # Topologically Sorted Source Nodes: [conv2d_14, h_20], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf54, primals_31, 147456, grid=grid(147456), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution] buf55 = extern_kernels.convolution(buf54, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 1, 3, 3), (9, 1, 3, 1)) buf56 = buf55; del buf55 # reuse # Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution] triton_poi_fused_convolution_21.run(buf56, primals_33, 36, grid=grid(36), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [h_23], Original ATen: [aten.convolution] buf57 = extern_kernels.convolution(buf56, primals_34, stride=(32, 32), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 1, 128, 128), (16384, 1, 128, 1)) buf58 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [h_24], Original ATen: [aten.clone] triton_poi_fused_clone_22.run(buf57, buf58, 16384, grid=grid(16384), stream=stream0) del buf57 return (buf58, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, primals_34, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf54, buf56, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 5, 64, 64), (20480, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 5, 3, 3), (45, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4096, 512, 7, 7), (25088, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4096, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((1, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 20 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 5 y1 = yindex // 5 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 5 * x2 + 20480 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 320 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 5 y1 = yindex // 5 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 5 * x2 + 45 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 49 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4393216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 131 x2 = xindex // 8384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask) tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8786432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8448 % 66 x1 = xindex // 128 % 66 x0 = xindex % 128 x3 = xindex // 557568 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 131, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 33 x2 = xindex // 8448 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask) tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8704 % 17 x1 = xindex // 512 % 17 x0 = xindex % 512 x3 = xindex // 147968 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 33, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 4608 % 9 x1 = xindex // 512 % 9 x0 = xindex % 512 x3 = xindex // 41472 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 17, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_clone_22(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 64 x2 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (2451 + x0 + 128 * x1 + 16384 * x2), None) tl.store(out_ptr0 + x3, tmp0, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34) = args args.clear() assert_size_stride(primals_1, (4, 5, 64, 64), (20480, 4096, 64, 1)) assert_size_stride(primals_2, (64, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1)) assert_size_stride(primals_29, (4096,), (1,)) assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_31, (4096,), (1,)) assert_size_stride(primals_32, (1, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_33, (1,), (1,)) assert_size_stride(primals_34, (1, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 64, 64), (20480, 1, 320, 5), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(20, 4096)](primals_1, buf0, 20, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 5, 3, 3), (45, 1, 15, 5), torch.float32) triton_poi_fused_1[grid(320, 9)](primals_2, buf1, 320, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32) triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152, 49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1) del primals_28 buf15 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_10[grid(17572864)](buf16, primals_3, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_10[grid(17572864)](buf18, primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf19 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.float32) buf20 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(4393216)](buf18, buf19, buf20, 4393216, XBLOCK=512, num_warps=8, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_12[grid(8786432)](buf22, primals_7, 8786432, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_12[grid(8786432)](buf24, primals_9, 8786432, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf25 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(2230272)](buf24, buf25, buf26, 2230272, XBLOCK=512, num_warps=8, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_14[grid(4460544)](buf28, primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_14[grid(4460544)](buf30, primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_14[grid(4460544)](buf32, primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf33 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(1115136)](buf32, buf33, buf34, 1115136, XBLOCK=512, num_warps=8, num_stages=1) buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_16[grid(2230272)](buf36, primals_17, 2230272, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf38 = buf37 del buf37 triton_poi_fused_convolution_relu_16[grid(2230272)](buf38, primals_19, 2230272, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf40 = buf39 del buf39 triton_poi_fused_convolution_relu_16[grid(2230272)](buf40, primals_21, 2230272, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf41 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_17[grid(591872)](buf40, buf41, buf42, 591872, XBLOCK=1024, num_warps=4, num_stages=1) buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_18[grid(591872)](buf44, primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_18[grid(591872)](buf46, primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf48 = buf47 del buf47 triton_poi_fused_convolution_relu_18[grid(591872)](buf48, primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf49 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_19[grid(165888)](buf48, buf49, buf50, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_20[grid(147456)](buf52, primals_29, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf54 = buf53 del buf53 triton_poi_fused_convolution_relu_20[grid(147456)](buf54, primals_31, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf55 = extern_kernels.convolution(buf54, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 1, 3, 3), (9, 1, 3, 1)) buf56 = buf55 del buf55 triton_poi_fused_convolution_21[grid(36)](buf56, primals_33, 36, XBLOCK=64, num_warps=1, num_stages=1) del primals_33 buf57 = extern_kernels.convolution(buf56, primals_34, stride=(32, 32), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 1, 128, 128), (16384, 1, 128, 1)) buf58 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) triton_poi_fused_clone_22[grid(16384)](buf57, buf58, 16384, XBLOCK= 128, num_warps=4, num_stages=1) del buf57 return (buf58, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, primals_34, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf54, buf56) def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() class FCN32sNew(nn.Module): def __init__(self, n_channels=5, n_class=1): super(FCN32sNew, self).__init__() self.conv1_1 = nn.Conv2d(5, 64, 3, padding=100) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_1 = nn.ReLU(inplace=True) self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_2 = nn.ReLU(inplace=True) self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_3 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.fc6 = nn.Conv2d(512, 4096, 7) self.relu6 = nn.ReLU(inplace=True) self.drop6 = nn.Dropout2d() self.fc7 = nn.Conv2d(4096, 4096, 1) self.relu7 = nn.ReLU(inplace=True) self.drop7 = nn.Dropout2d() self.score_fr = nn.Conv2d(4096, n_class, 1) self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32, bias=False) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): assert m.kernel_size[0] == m.kernel_size[1] initial_weight = get_upsampling_weight(m.in_channels, m. out_channels, m.kernel_size[0]) m.weight.data.copy_(initial_weight) def copy_params_from_vgg16(self, vgg16): features = [self.conv1_1, self.relu1_1, self.conv1_2, self.relu1_2, self.pool1, self.conv2_1, self.relu2_1, self.conv2_2, self. relu2_2, self.pool2, self.conv3_1, self.relu3_1, self.conv3_2, self.relu3_2, self.conv3_3, self.relu3_3, self.pool3, self. conv4_1, self.relu4_1, self.conv4_2, self.relu4_2, self.conv4_3, self.relu4_3, self.pool4, self.conv5_1, self.relu5_1, self. conv5_2, self.relu5_2, self.conv5_3, self.relu5_3, self.pool5] for l1, l2 in zip(vgg16.features, features): if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): assert l1.weight.size() == l2.weight.size() assert l1.bias.size() == l2.bias.size() l2.weight.data = l1.weight.data l2.bias.data = l1.bias.data for i, name in zip([0, 3], ['fc6', 'fc7']): l1 = vgg16.classifier[i] l2 = getattr(self, name) l2.weight.data = l1.weight.data.view(l2.weight.size()) l2.bias.data = l1.bias.data.view(l2.bias.size()) def forward(self, input_0): primals_2 = self.conv1_1.weight primals_3 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.fc6.weight primals_29 = self.fc6.bias primals_30 = self.fc7.weight primals_31 = self.fc7.bias primals_32 = self.score_fr.weight primals_33 = self.score_fr.bias primals_34 = self.upscore.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34]) return output[0]
Yusoi/mmdetection
FCN32s
false
9,960
[ "Apache-2.0" ]
0
cbb5fb00f6e124fbb2c15e7e3438d7fa76b8850a
https://github.com/Yusoi/mmdetection/tree/cbb5fb00f6e124fbb2c15e7e3438d7fa76b8850a
SAM_Module
import torch import torch.nn as nn from torchvision.transforms import * class SAM_Module(nn.Module): """ Position attention module""" def __init__(self, channels): super(SAM_Module, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv_after_concat = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1) self.sigmoid_spatial = nn.Sigmoid() def forward(self, x): """ inputs : x : input feature maps( B X C X H X W) returns : out : attention value + input feature attention: B X (HxW) X (HxW) """ module_input = x avg = torch.mean(x, 1, True) x = self.conv_after_concat(avg) x = self.sigmoid_spatial(x) x = module_input * x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, buf0, buf2 class SAM_ModuleNew(nn.Module): """ Position attention module""" def __init__(self, channels): super(SAM_ModuleNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv_after_concat = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1) self.sigmoid_spatial = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv_after_concat.weight primals_3 = self.conv_after_concat.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Vill-Lab/IGOAS
SAM_Module
false
18,043
[ "MIT" ]
8
42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
https://github.com/Vill-Lab/IGOAS/tree/42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
LinearAdditiveUpsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/eq/ceqd4v2pibbxxx65uo2foiehgfuw64kx77b4cfkmooflc3b45zik.py # Topologically Sorted Source Nodes: [resizing_layer], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # resizing_layer => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_10, add_11, add_4, add_6, add_7, add_8, add_9, clamp_max_3, clamp_max_4, clamp_max_5, clamp_min_2, clamp_min_3, clamp_min_4, clamp_min_5, convert_element_type_1, convert_element_type_3, convert_element_type_4, convert_element_type_5, iota_2, mul_2, mul_3, mul_4, mul_5, mul_6, mul_7, mul_8, mul_9, sub_10, sub_11, sub_12, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9 # Graph fragment: # %convert_element_type_1 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %convert_element_type_3 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_1, torch.int64), kwargs = {}) # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 1.0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, 0.5), kwargs = {}) # %clamp_min_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %convert_element_type_5 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_2, torch.int64), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1, %clamp_max_2]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1, %convert_element_type_5]), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_2, %convert_element_type_5), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=4] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_3), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_6), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3, %clamp_max_2]), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3, %convert_element_type_5]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_5), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %add_8), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %convert_element_type_3), kwargs = {}) # %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0.0), kwargs = {}) # %clamp_max_4 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_4, 1.0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_4), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_8), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1, %clamp_max_2]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1, %convert_element_type_5]), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %clamp_max_3), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_4), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3, %clamp_max_2]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3, %convert_element_type_5]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_3), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_7, %add_6), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_4), kwargs = {}) # %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %add_10), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_11, 0.0), kwargs = {}) # %clamp_max_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_5, 1.0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_5), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_out_ptr1, in_out_ptr3, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 4 x1 = (xindex // 4) % 4 x0 = xindex % 4 x3 = (xindex // 64) x6 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = tmp11 + tmp2 tmp13 = tmp12 * tmp4 tmp14 = tmp13 - tmp2 tmp15 = triton_helpers.maximum(tmp14, tmp7) tmp16 = tmp15.to(tl.int32) tmp17 = tl.full([1], 1, tl.int64) tmp18 = tmp16 + tmp17 tmp19 = tl.full([1], 3, tl.int64) tmp20 = triton_helpers.minimum(tmp18, tmp19) tmp21 = x0 tmp22 = tmp21.to(tl.float32) tmp23 = tmp22 + tmp2 tmp24 = tmp23 * tmp4 tmp25 = tmp24 - tmp2 tmp26 = triton_helpers.maximum(tmp25, tmp7) tmp27 = tmp26.to(tl.int32) tmp28 = tmp27 + tmp17 tmp29 = triton_helpers.minimum(tmp28, tmp19) tmp30 = tl.load(in_ptr0 + (tmp29 + (4*tmp20) + (16*tmp9) + (64*x3)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (tmp27 + (4*tmp20) + (16*tmp9) + (64*x3)), xmask, eviction_policy='evict_last') tmp32 = tmp30 - tmp31 tmp33 = tmp27.to(tl.float32) tmp34 = tmp26 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp7) tmp36 = triton_helpers.minimum(tmp35, tmp4) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tmp39 = tl.load(in_ptr0 + (tmp29 + (4*tmp16) + (16*tmp9) + (64*x3)), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr0 + (tmp27 + (4*tmp16) + (16*tmp9) + (64*x3)), xmask, eviction_policy='evict_last') tmp41 = tmp39 - tmp40 tmp42 = tmp41 * tmp36 tmp43 = tmp40 + tmp42 tmp44 = tmp9 + tmp17 tmp45 = triton_helpers.minimum(tmp44, tmp19) tmp46 = tl.load(in_ptr0 + (tmp29 + (4*tmp20) + (16*tmp45) + (64*x3)), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (tmp27 + (4*tmp20) + (16*tmp45) + (64*x3)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr0 + (tmp29 + (4*tmp16) + (16*tmp45) + (64*x3)), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr0 + (tmp27 + (4*tmp16) + (16*tmp45) + (64*x3)), xmask, eviction_policy='evict_last') tmp50 = tmp48 - tmp49 tmp51 = tmp50 * tmp36 tmp52 = tmp49 + tmp51 tmp53 = tmp46 - tmp47 tmp54 = tmp53 * tmp36 tmp55 = tmp47 + tmp54 tmp56 = tmp55 - tmp52 tmp57 = tmp16.to(tl.float32) tmp58 = tmp15 - tmp57 tmp59 = triton_helpers.maximum(tmp58, tmp7) tmp60 = triton_helpers.minimum(tmp59, tmp4) tmp61 = tmp56 * tmp60 tmp62 = tmp52 + tmp61 tmp63 = tmp38 - tmp43 tmp64 = tmp63 * tmp60 tmp65 = tmp43 + tmp64 tmp66 = tmp62 - tmp65 tmp67 = tmp9.to(tl.float32) tmp68 = tmp8 - tmp67 tmp69 = triton_helpers.maximum(tmp68, tmp7) tmp70 = triton_helpers.minimum(tmp69, tmp4) tmp71 = tmp66 * tmp70 tl.store(in_out_ptr0 + (x6), tmp38, xmask) tl.store(in_out_ptr1 + (x6), tmp43, xmask) tl.store(in_out_ptr3 + (x6), tmp71, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/q7/cq7a57jaykep2rn5amyxkdznm2ecmdbkbyxbbpppwdsky6lbmylr.py # Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.sum] # Source node to ATen node mapping: # output_tensor => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_3, [2]), kwargs = {}) triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 64 x1 = (xindex // 4) % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x4 + (256*x3)), xmask) tmp1 = tl.load(in_ptr1 + (x4 + (256*x3)), xmask) tmp19 = tl.load(in_ptr2 + (x4 + (256*x3)), xmask) tmp21 = tl.load(in_ptr0 + (64 + x4 + (256*x3)), xmask) tmp22 = tl.load(in_ptr1 + (64 + x4 + (256*x3)), xmask) tmp26 = tl.load(in_ptr2 + (64 + x4 + (256*x3)), xmask) tmp29 = tl.load(in_ptr0 + (128 + x4 + (256*x3)), xmask) tmp30 = tl.load(in_ptr1 + (128 + x4 + (256*x3)), xmask) tmp34 = tl.load(in_ptr2 + (128 + x4 + (256*x3)), xmask) tmp37 = tl.load(in_ptr0 + (192 + x4 + (256*x3)), xmask) tmp38 = tl.load(in_ptr1 + (192 + x4 + (256*x3)), xmask) tmp42 = tl.load(in_ptr2 + (192 + x4 + (256*x3)), xmask) tmp2 = tmp1 - tmp0 tmp3 = x1 tmp4 = tmp3.to(tl.float32) tmp5 = 0.5 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = tmp8 - tmp5 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp11.to(tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 - tmp13 tmp15 = triton_helpers.maximum(tmp14, tmp10) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = tmp2 * tmp16 tmp18 = tmp0 + tmp17 tmp20 = tmp18 + tmp19 tmp23 = tmp22 - tmp21 tmp24 = tmp23 * tmp16 tmp25 = tmp21 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp20 + tmp27 tmp31 = tmp30 - tmp29 tmp32 = tmp31 * tmp16 tmp33 = tmp29 + tmp32 tmp35 = tmp33 + tmp34 tmp36 = tmp28 + tmp35 tmp39 = tmp38 - tmp37 tmp40 = tmp39 * tmp16 tmp41 = tmp37 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 + tmp43 tl.store(out_ptr0 + (x5), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf7 = buf6; del buf6 # reuse buf8 = buf7; del buf7 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf10 = buf9; del buf9 # reuse buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf5 = buf0; del buf0 # reuse buf11 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [resizing_layer], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf8, buf10, buf11, arg0_1, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf12 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.sum] triton_poi_fused_sum_1.run(buf10, buf8, buf11, buf12, 256, grid=grid(256), stream=stream0) del buf10 del buf11 del buf8 return (buf12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_out_ptr1, in_out_ptr3, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 4 x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex // 64 x6 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = tmp11 + tmp2 tmp13 = tmp12 * tmp4 tmp14 = tmp13 - tmp2 tmp15 = triton_helpers.maximum(tmp14, tmp7) tmp16 = tmp15.to(tl.int32) tmp17 = tl.full([1], 1, tl.int64) tmp18 = tmp16 + tmp17 tmp19 = tl.full([1], 3, tl.int64) tmp20 = triton_helpers.minimum(tmp18, tmp19) tmp21 = x0 tmp22 = tmp21.to(tl.float32) tmp23 = tmp22 + tmp2 tmp24 = tmp23 * tmp4 tmp25 = tmp24 - tmp2 tmp26 = triton_helpers.maximum(tmp25, tmp7) tmp27 = tmp26.to(tl.int32) tmp28 = tmp27 + tmp17 tmp29 = triton_helpers.minimum(tmp28, tmp19) tmp30 = tl.load(in_ptr0 + (tmp29 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (tmp27 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask, eviction_policy='evict_last') tmp32 = tmp30 - tmp31 tmp33 = tmp27.to(tl.float32) tmp34 = tmp26 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp7) tmp36 = triton_helpers.minimum(tmp35, tmp4) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tmp39 = tl.load(in_ptr0 + (tmp29 + 4 * tmp16 + 16 * tmp9 + 64 * x3), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr0 + (tmp27 + 4 * tmp16 + 16 * tmp9 + 64 * x3), xmask, eviction_policy='evict_last') tmp41 = tmp39 - tmp40 tmp42 = tmp41 * tmp36 tmp43 = tmp40 + tmp42 tmp44 = tmp9 + tmp17 tmp45 = triton_helpers.minimum(tmp44, tmp19) tmp46 = tl.load(in_ptr0 + (tmp29 + 4 * tmp20 + 16 * tmp45 + 64 * x3), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (tmp27 + 4 * tmp20 + 16 * tmp45 + 64 * x3), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr0 + (tmp29 + 4 * tmp16 + 16 * tmp45 + 64 * x3), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr0 + (tmp27 + 4 * tmp16 + 16 * tmp45 + 64 * x3), xmask, eviction_policy='evict_last') tmp50 = tmp48 - tmp49 tmp51 = tmp50 * tmp36 tmp52 = tmp49 + tmp51 tmp53 = tmp46 - tmp47 tmp54 = tmp53 * tmp36 tmp55 = tmp47 + tmp54 tmp56 = tmp55 - tmp52 tmp57 = tmp16.to(tl.float32) tmp58 = tmp15 - tmp57 tmp59 = triton_helpers.maximum(tmp58, tmp7) tmp60 = triton_helpers.minimum(tmp59, tmp4) tmp61 = tmp56 * tmp60 tmp62 = tmp52 + tmp61 tmp63 = tmp38 - tmp43 tmp64 = tmp63 * tmp60 tmp65 = tmp43 + tmp64 tmp66 = tmp62 - tmp65 tmp67 = tmp9.to(tl.float32) tmp68 = tmp8 - tmp67 tmp69 = triton_helpers.maximum(tmp68, tmp7) tmp70 = triton_helpers.minimum(tmp69, tmp4) tmp71 = tmp66 * tmp70 tl.store(in_out_ptr0 + x6, tmp38, xmask) tl.store(in_out_ptr1 + x6, tmp43, xmask) tl.store(in_out_ptr3 + x6, tmp71, xmask) @triton.jit def triton_poi_fused_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 64 x1 = xindex // 4 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 256 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x4 + 256 * x3), xmask) tmp19 = tl.load(in_ptr2 + (x4 + 256 * x3), xmask) tmp21 = tl.load(in_ptr0 + (64 + x4 + 256 * x3), xmask) tmp22 = tl.load(in_ptr1 + (64 + x4 + 256 * x3), xmask) tmp26 = tl.load(in_ptr2 + (64 + x4 + 256 * x3), xmask) tmp29 = tl.load(in_ptr0 + (128 + x4 + 256 * x3), xmask) tmp30 = tl.load(in_ptr1 + (128 + x4 + 256 * x3), xmask) tmp34 = tl.load(in_ptr2 + (128 + x4 + 256 * x3), xmask) tmp37 = tl.load(in_ptr0 + (192 + x4 + 256 * x3), xmask) tmp38 = tl.load(in_ptr1 + (192 + x4 + 256 * x3), xmask) tmp42 = tl.load(in_ptr2 + (192 + x4 + 256 * x3), xmask) tmp2 = tmp1 - tmp0 tmp3 = x1 tmp4 = tmp3.to(tl.float32) tmp5 = 0.5 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = tmp8 - tmp5 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp11.to(tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 - tmp13 tmp15 = triton_helpers.maximum(tmp14, tmp10) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = tmp2 * tmp16 tmp18 = tmp0 + tmp17 tmp20 = tmp18 + tmp19 tmp23 = tmp22 - tmp21 tmp24 = tmp23 * tmp16 tmp25 = tmp21 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp20 + tmp27 tmp31 = tmp30 - tmp29 tmp32 = tmp31 * tmp16 tmp33 = tmp29 + tmp32 tmp35 = tmp33 + tmp34 tmp36 = tmp28 + tmp35 tmp39 = tmp38 - tmp37 tmp40 = tmp39 * tmp16 tmp41 = tmp37 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 + tmp43 tl.store(out_ptr0 + x5, tmp44, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf7 = buf6 del buf6 buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf10 = buf9 del buf9 buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf5 = buf0 del buf0 buf11 = buf5 del buf5 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (1024)](buf8, buf10, buf11, arg0_1, 1024, XBLOCK=256, num_warps =4, num_stages=1) del arg0_1 buf12 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) triton_poi_fused_sum_1[grid(256)](buf10, buf8, buf11, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del buf11 del buf8 return buf12, class LinearAdditiveUpsampleNew(nn.Module): """Bi/Trilinear Additive Upsample Upsampling strategy described in Wojna et al (https://doi.org/10.1007/s11263-019-01170-8) to avoid checkerboard patterns while keeping a better performance for the convolution. Parameters: scale_factor (int) -- the factor for the upsampling operation n_splits (float) -- the channel reduction factor threed (bool) -- 3D (true) or 2D (false) network """ def __init__(self, scale_factor, n_splits, threed): super(LinearAdditiveUpsampleNew, self).__init__() self.scale_factor = scale_factor self.n_splits = n_splits if threed: self.mode = 'trilinear' else: self.mode = 'bilinear' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
giuliabaldini/Pix2PixNIfTI
LinearAdditiveUpsample
false
3,544
[ "BSD-3-Clause" ]
0
59ff825760f682d2734bd5e95503a03f80d32414
https://github.com/giuliabaldini/Pix2PixNIfTI/tree/59ff825760f682d2734bd5e95503a03f80d32414
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/no/cnon5ajue75qf7yiwbkruc77mekmrelvorvagtbhdc4kdbzwzdin.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a5/ca56rgpdjbilaspdaau44lnrilsxekhmcnbwzhtrcgfbilkik27p.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mf/cmfwwg65sbylkho5zzcgwfl3cw2tru7uwmgab3xaed7bzoxeyhoe.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xo/cxo7kebwccavmyl6qt2qy6dv6nxtxbcnmr6bte7ferqglr6pf5lx.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean] # Source node to ATen node mapping: # multi_head_attention_forward => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_11, [1]), kwargs = {}) triton_poi_fused_mean_4 = async_compile.triton('triton_poi_fused_mean_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a5/ca5u4qmkoeql76ogmh3jtan7rcyuzjntmkbi6vseh2aka4oj7x22.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add # src_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ud/cudlmttxldaqwjebx3x6ystcji3chwr3lkxk64maxy4khpdp3nfn.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add # src_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2l/c2lx4cm43t5ldlcrg54wtrimftmxqc5oaodvjlhhxk26wwohsh7x.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jr/cjrivzlgffmd4mdkydwzbkypvq43vw5lx7lx34d75oidkn5oukkt.py # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] # Source node to ATen node mapping: # src_2 => add_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/w7/cw7rcffzrlzzq6c736du6qnb5ytwzbniibkuycklm4tfjn6me2ee.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qq/cqqkte7r3xbnvps7wbt5vgxcwukikbvlwemcbrcnle7woo3t3r3c.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_9), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {}) triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048, ), (1, )) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 buf10 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean] triton_poi_fused_mean_4.run(buf6, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf11, buf12, 4, grid=grid(4), stream=stream0) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf11, buf12, primals_6, primals_7, buf13, 16, grid=grid(16), stream=stream0) del primals_7 buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf13, reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf14) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_7.run(buf15, primals_9, 8192, grid=grid(8192), stream=stream0) del primals_9 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf17, buf13, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 buf18 = buf12; del buf12 # reuse buf19 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf17, buf18, buf19, 4, grid=grid(4), stream=stream0) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf17, buf18, buf19, primals_12, primals_13, buf20, 16, grid=grid(16), stream=stream0) del buf18 del buf19 del primals_13 return (buf20, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), primals_1, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf13, buf15, buf17, primals_10, primals_8, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import torch.nn.functional as F from torch.nn.modules import Module from torch.nn.modules.activation import MultiheadAttention from torch.nn.modules.dropout import Dropout from torch.nn.modules.linear import Linear from torch.nn.modules.normalization import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mean_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048,), (1,)) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf9) del primals_5 buf10 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mean_4[grid(16)](buf6, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(4)](primals_1, buf9, buf11, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9, buf11, buf12, primals_6, primals_7, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf13, reinterpret_tensor(primals_8, (4, 2048), ( 1, 4), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_relu_7[grid(8192)](buf15, primals_9, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_add_8[grid(16)](buf17, buf13, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf18 = buf12 del buf12 buf19 = buf11 del buf11 triton_poi_fused_native_layer_norm_9[grid(4)](buf17, buf18, buf19, 4, XBLOCK=4, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(16)](buf17, buf18, buf19, primals_12, primals_13, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf18 del buf19 del primals_13 return (buf20, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), primals_1, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1 ), 0), buf9, buf13, buf15, buf17, primals_10, primals_8, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)) def _get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayerNew(Module): """TransformerEncoderLayer is made up of self-attn and feedforward network. This standard encoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerEncoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayerNew, self).__setstate__(state) def forward(self, input_0): primals_2 = self.self_attn.in_proj_weight primals_3 = self.self_attn.in_proj_bias primals_1 = self.self_attn.out_proj.weight primals_5 = self.self_attn.out_proj.bias primals_8 = self.linear1.weight primals_9 = self.linear1.bias primals_10 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.norm2.weight primals_13 = self.norm2.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
Lingzhi-WANG/Quotation-Recommendation
TransformerEncoderLayer
false
17,595
[ "MIT" ]
4
40a875a41f10a597604206e067a16cbbfc88cdd7
https://github.com/Lingzhi-WANG/Quotation-Recommendation/tree/40a875a41f10a597604206e067a16cbbfc88cdd7
MaxPoolStride1
import torch import torch.nn as nn import torch.nn.functional as F class MaxPoolStride1(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp5 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * (1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp13 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1 ) * (1 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp21 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1 ) * (2 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolStride1New(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1New, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TCC-MonitoramentoInteligente/dev-tool
MaxPoolStride1
false
9,509
[ "MIT" ]
0
d3a1d697c4ba7a5fff54be08541da4fc4811ab5e
https://github.com/TCC-MonitoramentoInteligente/dev-tool/tree/d3a1d697c4ba7a5fff54be08541da4fc4811ab5e
Generator
import torch from torch import nn def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1): while True: gumbels = -torch.empty_like(logits).exponential_().log() gumbels = (logits + gumbels) / tau if log_mode: y_soft = gumbels.log_softmax(dim) else: y_soft = gumbels.softmax(dim) if torch.sum(torch.isnan(y_soft)).item() < 0.01: break if hard: index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: ret = y_soft return ret class Generator(nn.Module): def __init__(self, vocab_size, dec_hidden_size, pad_idx): super(Generator, self).__init__() self.linear = nn.Linear(dec_hidden_size, vocab_size) self.softmax = nn.LogSoftmax(dim=-1) self.pad_idx = pad_idx def forward(self, x, use_gumbel_softmax=False): output = self.linear(x) output[:, self.pad_idx] = -float('inf') if use_gumbel_softmax: output = gumbel_softmax(output, log_mode=True, dim=-1) else: output = self.softmax(output) return output def get_inputs(): return [torch.rand([4, 5, 4, 4])] def get_init_inputs(): return [[], {'vocab_size': 4, 'dec_hidden_size': 4, 'pad_idx': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 5 x5 = xindex x6 = xindex // 4 tmp3 = tl.load(in_ptr0 + x5, xmask) tmp6 = tl.load(in_ptr0 + 4 * x6, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp0 = x2 tmp1 = tl.full([1], 4, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = float('-inf') tmp5 = tl.where(tmp2, tmp4, tmp3) tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tl.where(tmp2, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tl.where(tmp2, tmp4, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp15 = tl.where(tmp2, tmp4, tmp14) tmp16 = triton_helpers.maximum(tmp13, tmp15) tmp17 = tmp5 - tmp16 tl.store(out_ptr0 + x5, tmp17, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(320)](buf0, buf1, 320, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(320)](buf1, buf2, 320, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2 def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1): while True: gumbels = -torch.empty_like(logits).exponential_().log() gumbels = (logits + gumbels) / tau if log_mode: y_soft = gumbels.log_softmax(dim) else: y_soft = gumbels.softmax(dim) if torch.sum(torch.isnan(y_soft)).item() < 0.01: break if hard: index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: ret = y_soft return ret class GeneratorNew(nn.Module): def __init__(self, vocab_size, dec_hidden_size, pad_idx): super(GeneratorNew, self).__init__() self.linear = nn.Linear(dec_hidden_size, vocab_size) self.softmax = nn.LogSoftmax(dim=-1) self.pad_idx = pad_idx def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mosespv96/SCAPT-ABSA
Generator
false
16,108
[ "MIT" ]
49
6f7f89a131127f262a8d1fd2774e5a96b58e7193
https://github.com/mosespv96/SCAPT-ABSA/tree/6f7f89a131127f262a8d1fd2774e5a96b58e7193
PatchEmbed
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nl/cnlokrj2wjyrgg7wfimnkgyoc67ges2kinndxwhgqm3b33ayddof.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24576 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (64*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (4096*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/nq/cnqioqtc5smqmnt22pzdujcgch6iuo4ayzdajy2hr5awqxgsqhdm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (262144*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/l6/cl6ocqksyk3wleegoip6f6dl6yzvtddsatt22zjqsevei4dpu6kx.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [8, 8], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1536 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 384 y1 = (yindex // 384) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (384*x2) + (24576*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (64*y3)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (384, 64, 8, 8), (4096, 64, 8, 1)) assert_size_stride(primals_2, (384, ), (1, )) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((384, 64, 8, 8), (4096, 1, 512, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 24576, 64, grid=grid(24576, 64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 256, 4096, grid=grid(256, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, buf0, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 384, 8, 8), (24576, 1, 3072, 384)) buf3 = empty_strided_cuda((4, 384, 8, 8), (24576, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 1536, 64, grid=grid(1536, 64), stream=stream0) del buf2 del primals_2 return (buf3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((384, 64, 8, 8), (4096, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 1536 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 384 y1 = yindex // 384 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 384 * x2 + 24576 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (384, 64, 8, 8), (4096, 64, 8, 1)) assert_size_stride(primals_2, (384,), (1,)) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((384, 64, 8, 8), (4096, 1, 512, 64), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(24576, 64)](primals_1, buf0, 24576, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_1[grid(256, 4096)](primals_3, buf1, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 384, 8, 8), (24576, 1, 3072, 384)) buf3 = empty_strided_cuda((4, 384, 8, 8), (24576, 64, 8, 1), torch. float32) triton_poi_fused_convolution_2[grid(1536, 64)](buf2, primals_2, buf3, 1536, 64, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf2 del primals_2 return buf3, buf0, buf1 class PatchEmbedNew(nn.Module): """ Image to Patch Embedding. Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding """ def __init__(self, img_size=224, stem_conv=False, stem_stride=1, patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384): super().__init__() assert patch_size in [4, 8, 16] self.stem_conv = stem_conv if stem_conv: self.conv = nn.Sequential(nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn. Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU (inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), nn. BatchNorm2d(hidden_dim), nn.ReLU(inplace=True)) self.proj = nn.Conv2d(hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) self.num_patches = img_size // patch_size * (img_size // patch_size) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
QLSong/cv-classify
PatchEmbed
false
2,749
[ "Apache-2.0" ]
0
02f53d03868f299a08b5c97a266b50a7fdcd3f2b
https://github.com/QLSong/cv-classify/tree/02f53d03868f299a08b5c97a266b50a7fdcd3f2b
ToContinuous
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3b/c3b62pdsbck73tub674e2ap2qhafl7nnia7qegg6orm2txneplfz.py # Topologically Sorted Source Nodes: [max_1, float_2, sub, x_2, mul, x_3, setitem], Original ATen: [aten.max, aten._to_copy, aten.sub, aten.div, aten.mul, aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # float_2 => convert_element_type_1 # max_1 => max_1 # mul => mul # setitem => full_default, index_put # sub => sub # x_2 => div # x_3 => sub_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%getitem_1, torch.float32), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%sub_1, [%bitwise_not], %full_default), kwargs = {}) triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tmp46 > tmp10 tmp48 = tmp47 == 0 tmp49 = tmp46.to(tl.float32) tmp50 = 1.0 tmp51 = tmp49 - tmp50 tmp52 = 0.5 tmp53 = tmp51 * tmp52 tmp54 = 2.0 tmp55 = tmp53 * tmp54 tmp56 = tmp55 - tmp50 tmp57 = -1.0 tmp58 = tl.where(tmp48, tmp57, tmp56) tl.store(out_ptr0 + (x2), tmp46, xmask) tl.store(out_ptr1 + (x2), tmp58, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/j5/cj5xlqb2izp3v4stsdw4qkcghuulalwjuyl2kw4xl2gdfve2bely.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_4 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 2 x2 = (xindex // 32) x0 = xindex % 16 x3 = xindex tmp0 = x2 + (4*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*(x2 + (4*x1)))), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x2 + (4*x1)))), tmp6 & xmask, other=0.0) tmp10 = tmp9 > tmp1 tmp11 = tmp10.to(tl.float32) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1, float_2, sub, x_2, mul, x_3, setitem], Original ATen: [aten.max, aten._to_copy, aten.sub, aten.div, aten.mul, aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0.run(arg0_1, buf0, buf1, 64, grid=grid(64), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf0, buf2, 128, grid=grid(128), stream=stream0) del buf0 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tmp46 > tmp10 tmp48 = tmp47 == 0 tmp49 = tmp46.to(tl.float32) tmp50 = 1.0 tmp51 = tmp49 - tmp50 tmp52 = 0.5 tmp53 = tmp51 * tmp52 tmp54 = 2.0 tmp55 = tmp53 * tmp54 tmp56 = tmp55 - tmp50 tmp57 = -1.0 tmp58 = tl.where(tmp48, tmp57, tmp56) tl.store(out_ptr0 + x2, tmp46, xmask) tl.store(out_ptr1 + x2, tmp58, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x2 = xindex // 32 x0 = xindex % 16 x3 = xindex tmp0 = x2 + 4 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * (x2 + 4 * x1)), tmp4 & xmask, other=0.0 ) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x2 + 4 * x1)), tmp6 & xmask, other=0.0) tmp10 = tmp9 > tmp1 tmp11 = tmp10.to(tl.float32) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0[grid (64)](arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(128)](buf1, buf0, buf2, 128, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, class ToContinuousNew(nn.Module): def __init__(self): super(ToContinuousNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kampta/multiview-shapes
ToContinuous
false
3,796
[ "MIT" ]
0
a79eb4b492be8c2c279e2c69b13d5a19dff1621b
https://github.com/kampta/multiview-shapes/tree/a79eb4b492be8c2c279e2c69b13d5a19dff1621b
UpConv
import torch import torch.nn as nn class UpConv(nn.Module): def __init__(self, input_nc, output_nc, kernel_size): super(UpConv, self).__init__() self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels =output_nc, kernel_size=2, bias=True, stride=2, padding=0) self.activation_fn = nn.ELU() def forward(self, input): return self.activation_fn(self.deconv(input)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_nc': 4, 'output_nc': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(1024)](buf1, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, buf1 class UpConvNew(nn.Module): def __init__(self, input_nc, output_nc, kernel_size): super(UpConvNew, self).__init__() self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels =output_nc, kernel_size=2, bias=True, stride=2, padding=0) self.activation_fn = nn.ELU() def forward(self, input_0): primals_1 = self.deconv.weight primals_2 = self.deconv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dong1015323606/LKVOLearner
UpConv
false
15,205
[ "BSD-3-Clause" ]
237
6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
https://github.com/dong1015323606/LKVOLearner/tree/6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
BertGELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add => add # erf => erf # mul => mul # mul_1 => mul_1 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class BertGELUNew(nn.Module): """Bert uses GELU as the activation function for the position-wise network. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Codle/texar-pytorch
BertGELU
false
11,298
[ "Apache-2.0" ]
0
d63556e7a8f48076c396467314a771d56552d595
https://github.com/Codle/texar-pytorch/tree/d63556e7a8f48076c396467314a771d56552d595
_MLP_C
import torch import torch.nn as nn class _MLP_C(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_C, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X1, X2): X = torch.cat((X1, X2), 1) out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0 ), reinterpret_tensor(buf0, (128, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5 class _MLP_CNew(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_CNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
GaelKBertrand/Meliora_DeepLearning
_MLP_C
false
5,185
[ "MIT" ]
1
5618e01066d4d0afcd7dfe074dda91af22b5857c
https://github.com/GaelKBertrand/Meliora_DeepLearning/tree/5618e01066d4d0afcd7dfe074dda91af22b5857c
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dw/cdwecwssm3wtmxsvs2kudgljb4a2t55bg5xziiwfbr2wlehdvnmj.py # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # out => mul # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %div), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %div), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp17, xmask) tl.store(out_ptr2 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, buf1, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ashwath007/amenity-detection
L2Norm
false
6,263
[ "Apache-2.0" ]
1
acb885eb4d791acc6e65237445a4fc6830e4d30c
https://github.com/ashwath007/amenity-detection/tree/acb885eb4d791acc6e65237445a4fc6830e4d30c
Sigmoid
import torch import torch.nn as nn class ActivationFunction(nn.Module): def __init__(self): super().__init__() self.name = self.__class__.__name__ self.config = {'name': self.name} class Sigmoid(ActivationFunction): def forward(self, x): return 1 / (1 + torch.exp(-x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_neg_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp5 / tmp4 tmp7 = tmp6 * tmp3 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_neg_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ActivationFunction(nn.Module): def __init__(self): super().__init__() self.name = self.__class__.__name__ self.config = {'name': self.name} class SigmoidNew(ActivationFunction): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ashutoshml/lightning-tutorials
Sigmoid
false
6,243
[ "Apache-2.0" ]
1
898b8b6f9852c0b80f034a3187bc1cd34dd521ce
https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul_2 => mul_2 # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_3 # weight_1 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0; del buf0 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return (reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_3 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AsianZeus/Diverse-Facial-Edit
ModulatedConv2d
false
9,415
[ "Apache-2.0" ]
0
3d4b1b41546a08a1fa3cb164ade33e319806b12b
https://github.com/AsianZeus/Diverse-Facial-Edit/tree/3d4b1b41546a08a1fa3cb164ade33e319806b12b
PropMaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/o3/co3fvzuaybjp4ocvx5bnaukyhlezccwzraqdsx6elnoggrfynhhv.py # Topologically Sorted Source Nodes: [map_h], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # map_h => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lg/clgxgb5ifxkfz47ikkzqlj6pgovt74rn2ejb3t7kvtlwnebdzz74.py # Topologically Sorted Source Nodes: [map_h, setitem], Original ATen: [aten.new_zeros, aten.index_put] # Source node to ATen node mapping: # map_h => full_default # setitem => index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default, [None, None, %lift_fresh_copy, %lift_fresh_copy_1], %arg0_1), kwargs = {}) triton_poi_fused_index_put_new_zeros_1 = async_compile.triton('triton_poi_fused_index_put_new_zeros_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_new_zeros_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_new_zeros_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp11 = tl.load(in_ptr0 + (x2), xmask) tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tl.store(out_ptr0 + ((5*tmp10) + (16*x1)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/3m/c3m5zntshenarfio7ilpf5k726vzzoayxjev4sjdillw6khpkhfk.py # Topologically Sorted Source Nodes: [x, setitem_2], Original ATen: [aten.max_pool2d_with_indices, aten.index_put] # Source node to ATen node mapping: # setitem_2 => index_put_2 # x => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {}) # %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [None, None, %lift_fresh_copy_6, %lift_fresh_copy_7], %squeeze), kwargs = {}) triton_poi_fused_index_put_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_index_put_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_max_pool2d_with_indices_2', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (4*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = tl.where(tmp7, tmp4, tmp6) tmp9 = tl.full([1], 0, tl.int64) tmp10 = tl.where(tmp5, tmp9, tmp8) tmp11 = tl.full([1], 3, tl.int64) tmp12 = tl.where(tmp7, tmp6, tmp11) tmp13 = tl.where(tmp5, tmp4, tmp12) tl.store(out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr1 + (tmp13 + (4*tmp10) + (16*x1)), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rx/crxe2gmw4ppbxxq3kvcirqaw5tb4kagz5epjsaw3i5xywjm6fnxp.py # Topologically Sorted Source Nodes: [x_1, setitem_4], Original ATen: [aten.max_pool2d_with_indices, aten.index_put] # Source node to ATen node mapping: # setitem_4 => index_put_4 # x_1 => _low_memory_max_pool2d_with_offsets_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze_1, [1, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {}) # %index_put_4 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_2, [None, None, %lift_fresh_copy_12, %lift_fresh_copy_13], %squeeze_2), kwargs = {}) triton_poi_fused_index_put_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_index_put_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_max_pool2d_with_indices_3', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (3*x1)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (3*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp5, tmp6, tmp4) tmp8 = tl.full([1], 2, tl.int64) tmp9 = tl.full([1], 3, tl.int64) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr1 + (tmp10 + (4*tmp7) + (16*x1)), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/t3/ct3jy6bo7isikipngetj4c7rkfh6o7gaemb3wzt2usaj3ukrkpmn.py # Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.index_put] # Source node to ATen node mapping: # setitem_6 => index_put_6 # Graph fragment: # %index_put_6 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_4, [None, None, %full_default_2, %full_default_3], %squeeze_4), kwargs = {}) triton_poi_fused_index_put_4 = async_compile.triton('triton_poi_fused_index_put_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_4', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (3 + (16*x0)), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rk/crk2aqpub5cu4vqgxe7cgebyr75yv52xg2z4s5qtybyjz5gcgb4o.py # Topologically Sorted Source Nodes: [map_mask], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # map_mask => full_default_1 # Graph fragment: # %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_5 = async_compile.triton('triton_poi_fused_new_zeros_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_5(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/kq/ckqkdi7yzrq5y2u3jt6hddlvw44kdgifjvpb6zy7gn3uqjtw77rv.py # Topologically Sorted Source Nodes: [map_mask, getitem, iadd, setitem_1], Original ATen: [aten.new_zeros, aten.index, aten.add, aten.index_put] # Source node to ATen node mapping: # getitem => index # iadd => add # map_mask => full_default_1 # setitem_1 => index_put_1 # Graph fragment: # %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_1, [None, None, %lift_fresh_copy_2, %lift_fresh_copy_3]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index, 1), kwargs = {}) # %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default_1, [None, None, %lift_fresh_copy_4, %lift_fresh_copy_5], %add), kwargs = {}) triton_poi_fused_add_index_index_put_new_zeros_6 = async_compile.triton('triton_poi_fused_add_index_index_put_new_zeros_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_new_zeros_6', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_index_index_put_new_zeros_6(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp11 = 1.0 tl.store(out_ptr0 + ((5*tmp10) + (16*x1)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyh4d473zu5fq3zvya4d54k32mgtgjmqfi434h7w34pu27wva22o.py # Topologically Sorted Source Nodes: [getitem_1, iadd_1, setitem_3], Original ATen: [aten.index, aten.add, aten.index_put] # Source node to ATen node mapping: # getitem_1 => index_1 # iadd_1 => add_1 # setitem_3 => index_put_3 # Graph fragment: # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_1, [None, None, %lift_fresh_copy_8, %lift_fresh_copy_9]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_1, 1), kwargs = {}) # %index_put_3 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_1, [None, None, %lift_fresh_copy_10, %lift_fresh_copy_11], %add_1), kwargs = {}) triton_poi_fused_add_index_index_put_7 = async_compile.triton('triton_poi_fused_add_index_index_put_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_7', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_index_index_put_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.where(tmp4, tmp1, tmp3) tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp2, tmp6, tmp5) tmp8 = tl.full([1], 3, tl.int64) tmp9 = tl.where(tmp4, tmp3, tmp8) tmp10 = tl.where(tmp2, tmp1, tmp9) tmp11 = tl.load(in_ptr0 + (tmp10 + (4*tmp7) + (16*x1)), xmask, eviction_policy='evict_last') tmp12 = 1.0 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (tmp10 + (4*tmp7) + (16*x1)), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rx/crxdgu3cbwdxalwp2y4qfsqcgo77ly6gwe6f4f6cgp6tluldiivu.py # Topologically Sorted Source Nodes: [getitem_2, iadd_2, setitem_5], Original ATen: [aten.index, aten.add, aten.index_put] # Source node to ATen node mapping: # getitem_2 => index_2 # iadd_2 => add_2 # setitem_5 => index_put_5 # Graph fragment: # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_3, [None, None, %lift_fresh_copy_14, %lift_fresh_copy_15]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_2, 1), kwargs = {}) # %index_put_5 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_3, [None, None, %lift_fresh_copy_16, %lift_fresh_copy_17], %add_2), kwargs = {}) triton_poi_fused_add_index_index_put_8 = async_compile.triton('triton_poi_fused_add_index_index_put_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_8', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_index_index_put_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp1) tmp5 = tl.full([1], 2, tl.int64) tmp6 = tl.full([1], 3, tl.int64) tmp7 = tl.where(tmp2, tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (tmp7 + (4*tmp4) + (16*x1)), xmask, eviction_policy='evict_last') tmp9 = 1.0 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (tmp7 + (4*tmp4) + (16*x1)), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/cv/ccvvzhsqsa5cdebqqmpw52jbb6o2hpjsqzkzzl2zgw37x56bdno7.py # Topologically Sorted Source Nodes: [getitem_3, iadd_3, setitem_7], Original ATen: [aten.index, aten.add, aten.index_put] # Source node to ATen node mapping: # getitem_3 => index_3 # iadd_3 => add_3 # setitem_7 => index_put_7 # Graph fragment: # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_5, [None, None, %full_default_4, %full_default_5]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_3, 1), kwargs = {}) # %index_put_7 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_5, [None, None, %full_default_6, %full_default_7], %add_3), kwargs = {}) triton_poi_fused_add_index_index_put_9 = async_compile.triton('triton_poi_fused_add_index_index_put_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_9', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_index_index_put_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (3 + (16*x0)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [map_h], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf2, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [map_h, setitem], Original ATen: [aten.new_zeros, aten.index_put] triton_poi_fused_index_put_new_zeros_1.run(arg0_1, buf2, 64, grid=grid(64), stream=stream0) buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 3, 48, 1), torch.float32) # Topologically Sorted Source Nodes: [x, setitem_2], Original ATen: [aten.max_pool2d_with_indices, aten.index_put] triton_poi_fused_index_put_max_pool2d_with_indices_2.run(arg0_1, buf0, buf2, 48, grid=grid(48), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, setitem_4], Original ATen: [aten.max_pool2d_with_indices, aten.index_put] triton_poi_fused_index_put_max_pool2d_with_indices_3.run(buf0, buf1, buf2, 32, grid=grid(32), stream=stream0) del buf0 # Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.index_put] triton_poi_fused_index_put_4.run(buf1, buf2, 16, grid=grid(16), stream=stream0) del buf1 buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [map_mask], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_5.run(buf7, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [map_mask, getitem, iadd, setitem_1], Original ATen: [aten.new_zeros, aten.index, aten.add, aten.index_put] triton_poi_fused_add_index_index_put_new_zeros_6.run(buf7, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [getitem_1, iadd_1, setitem_3], Original ATen: [aten.index, aten.add, aten.index_put] triton_poi_fused_add_index_index_put_7.run(buf7, buf7, 12, grid=grid(12), stream=stream0) # Topologically Sorted Source Nodes: [getitem_2, iadd_2, setitem_5], Original ATen: [aten.index, aten.add, aten.index_put] triton_poi_fused_add_index_index_put_8.run(buf7, buf7, 8, grid=grid(8), stream=stream0) # Topologically Sorted Source Nodes: [getitem_3, iadd_3, setitem_7], Original ATen: [aten.index, aten.add, aten.index_put] triton_poi_fused_add_index_index_put_9.run(buf7, buf7, 4, grid=grid(4), stream=stream0) return (buf2, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_new_zeros_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp11 = tl.load(in_ptr0 + x2, xmask) tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = tl.where(tmp7, tmp4, tmp6) tmp9 = tl.full([1], 0, tl.int64) tmp10 = tl.where(tmp5, tmp9, tmp8) tmp11 = tl.full([1], 3, tl.int64) tmp12 = tl.where(tmp7, tmp6, tmp11) tmp13 = tl.where(tmp5, tmp4, tmp12) tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + (tmp13 + 4 * tmp10 + 16 * x1), tmp2, xmask) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 3 * x1), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 3 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp5, tmp6, tmp4) tmp8 = tl.full([1], 2, tl.int64) tmp9 = tl.full([1], 3, tl.int64) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + (tmp10 + 4 * tmp7 + 16 * x1), tmp2, xmask) @triton.jit def triton_poi_fused_index_put_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask) @triton.jit def triton_poi_fused_new_zeros_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_index_index_put_new_zeros_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp11 = 1.0 tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask) @triton.jit def triton_poi_fused_add_index_index_put_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.where(tmp4, tmp1, tmp3) tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp2, tmp6, tmp5) tmp8 = tl.full([1], 3, tl.int64) tmp9 = tl.where(tmp4, tmp3, tmp8) tmp10 = tl.where(tmp2, tmp1, tmp9) tmp11 = tl.load(in_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), xmask, eviction_policy='evict_last') tmp12 = 1.0 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), tmp13, xmask) @triton.jit def triton_poi_fused_add_index_index_put_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp1) tmp5 = tl.full([1], 2, tl.int64) tmp6 = tl.full([1], 3, tl.int64) tmp7 = tl.where(tmp2, tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), xmask, eviction_policy='evict_last') tmp9 = 1.0 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), tmp10, xmask) @triton.jit def triton_poi_fused_add_index_index_put_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) triton_poi_fused_index_put_new_zeros_1[grid(64)](arg0_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 3, 48, 1), torch.float32) triton_poi_fused_index_put_max_pool2d_with_indices_2[grid(48)](arg0_1, buf0, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 32, 1), torch.float32) triton_poi_fused_index_put_max_pool2d_with_indices_3[grid(32)](buf0, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 triton_poi_fused_index_put_4[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) triton_poi_fused_new_zeros_5[grid(64)](buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_new_zeros_6[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_7[grid(12)](buf7, buf7, 12, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_8[grid(8)](buf7, buf7, 8, XBLOCK=8, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_9[grid(4)](buf7, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf2, buf7 class PropMaxPoolNew(nn.Module): def __init__(self, cfg): super(PropMaxPoolNew, self).__init__() num_layers = cfg.NUM_LAYERS self.layers = nn.ModuleList([nn.Identity()] + [nn.MaxPool1d(2, stride=1) for _ in range(num_layers - 1)]) self.num_layers = num_layers def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
EGO4D/episodic-memory
PropMaxPool
false
8,812
[ "MIT" ]
27
2a3464882cd4f665c358c1b05a6397339e33c2e1
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
VanillaGenerativeAdversarialLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dj/cdjw23gjsvidmi7xlhfpigbxfqor2vsiltadbvmh3w5h4gtvhk46.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, full_default_1, log1p, mean, minimum, mul, neg, sub_1, sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %arg0_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class VanillaGenerativeAdversarialLossNew(nn.Module): """ Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_ Args: reduction (str, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` Inputs: - prediction (tensor): unnormalized discriminator predictions - real (bool): if the ground truth label is for real images or fake images. Default: true .. warning:: Do not use sigmoid as the last layer of Discriminator. """ def __init__(self, reduction='mean'): super(VanillaGenerativeAdversarialLossNew, self).__init__() self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
neka-nat/Transfer-Learning-Library
VanillaGenerativeAdversarialLoss
false
16,147
[ "MIT" ]
1,474
a3b27b0d7562fa90a02e914140b37ab438469e6c
https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c
h_sigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/rm/crm72qx6rsghhfnyhuw5kfn2z6uwrgv7td4c2blpcrynbklz2exg.py # Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] # Source node to ATen node mapping: # add => add # hardtanh => clamp_max, clamp_min # mul => mul # truediv => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {}) triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, hardtanh, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class h_sigmoidNew(nn.Module): def __init__(self, inplace=True, h_max=1): super(h_sigmoidNew, self).__init__() self.relu = nn.ReLU6(inplace=inplace) self.h_max = h_max def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
rahulmangalampalli/esvit
h_sigmoid
false
12,917
[ "MIT" ]
0
5caf6e36b088ae2e7aaa4100b307eec991078e3e
https://github.com/rahulmangalampalli/esvit/tree/5caf6e36b088ae2e7aaa4100b307eec991078e3e
OneLayerFCBodyWithAction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/kq/ckqwt2pwz66gtbhjk2az2uo7eeaumbjxytnisacsyuay6w5j2y3l.py # Topologically Sorted Source Nodes: [cat, phi], Original ATen: [aten.cat, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # cat => cat # phi => relu # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %view_3], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%cat,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_cat_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_cat_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp13 = 0.0 tmp14 = tmp12 <= tmp13 tl.store(out_ptr0 + (x3), tmp12, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [cat, phi], Original ATen: [aten.cat, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_cat_relu_threshold_backward_0.run(buf0, buf1, buf2, buf3, 512, grid=grid(512), stream=stream0) del buf0 del buf1 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp13 = 0.0 tmp14 = tmp12 <= tmp13 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_cat_relu_threshold_backward_0[grid(512)](buf0, buf1, buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf3 def layer_init(layer, w_scale=1.0): nn.init.orthogonal_(layer.weight.data) layer.weight.data.mul_(w_scale) nn.init.constant_(layer.bias.data, 0) return layer class OneLayerFCBodyWithActionNew(nn.Module): def __init__(self, state_dim, action_dim, hidden_units, gate=F.relu): super(OneLayerFCBodyWithActionNew, self).__init__() self.fc_s = layer_init(nn.Linear(state_dim, hidden_units)) self.fc_a = layer_init(nn.Linear(action_dim, hidden_units)) self.gate = gate self.feature_dim = hidden_units * 2 def forward(self, input_0, input_1): primals_1 = self.fc_s.weight primals_2 = self.fc_s.bias primals_4 = self.fc_a.weight primals_5 = self.fc_a.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Sohojoe/UdacityDeepRL-Project2
OneLayerFCBodyWithAction
false
5,844
[ "MIT" ]
1
7137eea0b606ea32d00424d23130ff213f03ecf1
https://github.com/Sohojoe/UdacityDeepRL-Project2/tree/7137eea0b606ea32d00424d23130ff213f03ecf1
LabelSmoothingCrossEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/cu/ccuko3mbxkqp7jt6hb7qbvtcoh24p6hyrhnytl6csszmm77k2twe.py # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logprobs => amax, clone_1, sub # Graph fragment: # %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1024 x1 = (xindex // 1024) x2 = xindex tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(x0 // 16)) + (x0 % 16)), None) tmp1 = tl.load(in_ptr0 + ((64*(x0 // 16)) + (x0 % 16)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + (64*(x0 // 16)) + (x0 % 16)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + (64*(x0 // 16)) + (x0 % 16)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + (64*(x0 // 16)) + (x0 % 16)), None, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ro/crontwu3nbwwbc6scrasdfjovpgqubfzbn3q5gjyfqs3i2zst534.py # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logprobs => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1024 + x0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2048 + x0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3072 + x0), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rb/crbplhsup7djsfltzsb2n2jefhyri7njg6573ienhsa36pldqvpl.py # Topologically Sorted Source Nodes: [mul, mean, smooth_loss, mul_1, add, loss, mean_1], Original ATen: [aten.mul, aten.mean, aten.neg, aten.add] # Source node to ATen node mapping: # add => add # loss => mul_2 # mean => mean # mean_1 => mean_1 # mul => mul # mul_1 => mul_1 # smooth_loss => neg_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.9), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_1, [-1]), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_2,), kwargs = {}) triton_per_fused_add_mean_mul_neg_2 = async_compile.triton('triton_per_fused_add_mean_mul_neg_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_neg_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp10 = tl.load(in_ptr1 + (r0), None) tmp11 = tl.load(in_ptr1 + (1024 + r0), None) tmp13 = tl.load(in_ptr1 + (2048 + r0), None) tmp15 = tl.load(in_ptr1 + (3072 + r0), None) tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r0 + (1024*tmp4)), None) tmp7 = -tmp6 tmp8 = 0.9 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = 4.0 tmp18 = tmp16 / tmp17 tmp19 = -tmp18 tmp20 = 0.1 tmp21 = tmp19 * tmp20 tmp22 = tmp9 + tmp21 tmp23 = 1.0 tmp24 = tmp22 * tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 1024.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1024, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32) # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 4096, grid=grid(4096), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32) # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf0, buf1, 4096, grid=grid(4096), stream=stream0) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, mean, smooth_loss, mul_1, add, loss, mean_1], Original ATen: [aten.mul, aten.mean, aten.neg, aten.add] triton_per_fused_add_mean_mul_neg_2.run(buf3, arg1_1, buf1, 1, 1024, grid=grid(1), stream=stream0) del arg1_1 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch._C import torch.serialization from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1024 x1 = xindex // 1024 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16) + x0 % 16), None) tmp1 = tl.load(in_ptr0 + (64 * (x0 // 16) + x0 % 16), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + 64 * (x0 // 16) + x0 % 16), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + 64 * (x0 // 16) + x0 % 16), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + 64 * (x0 // 16) + x0 % 16), None, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1024 + x0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2048 + x0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3072 + x0), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, None) @triton.jit def triton_per_fused_add_mean_mul_neg_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp10 = tl.load(in_ptr1 + r0, None) tmp11 = tl.load(in_ptr1 + (1024 + r0), None) tmp13 = tl.load(in_ptr1 + (2048 + r0), None) tmp15 = tl.load(in_ptr1 + (3072 + r0), None) tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r0 + 1024 * tmp4), None) tmp7 = -tmp6 tmp8 = 0.9 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = 4.0 tmp18 = tmp16 / tmp17 tmp19 = -tmp18 tmp20 = 0.1 tmp21 = tmp19 * tmp20 tmp22 = tmp9 + tmp21 tmp23 = 1.0 tmp24 = tmp22 * tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 1024.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(4096)](arg0_1, buf0, 4096, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32) triton_poi_fused__log_softmax_1[grid(4096)](buf0, buf1, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_mean_mul_neg_2[grid(1)](buf3, arg1_1, buf1, 1, 1024, num_warps=8, num_stages=1) del arg1_1 del buf1 return buf3, class LabelSmoothingCrossEntropyNew(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.1, loss_weight=1.0, loss_name='loss_ce'): super(LabelSmoothingCrossEntropyNew, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1.0 - smoothing self.loss_weight = loss_weight self._loss_name = loss_name @property def loss_name(self): """Loss Name. This function must be implemented and will return the name of this loss function. This name will be used to combine different loss items by simple sum operation. In addition, if you want this loss item to be included into the backward graph, `loss_` must be the prefix of the name. Returns: str: The name of this loss item. """ return self._loss_name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Molly6/segmentation_shengteng2021
LabelSmoothingCrossEntropy
false
8,569
[ "Apache-2.0" ]
21
33dfefa80193586f504069793d9e141944549e99
https://github.com/Molly6/segmentation_shengteng2021/tree/33dfefa80193586f504069793d9e141944549e99
MaskedL1Loss
import torch import torch.utils.data import torch.nn as nn class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss() def forward(self, input, target, mask): mask = mask.expand(-1, input.size()[1], -1, -1) loss = self.criterion(input * mask, target * mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp3 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class MaskedL1LossNew(nn.Module): def __init__(self): super(MaskedL1LossNew, self).__init__() self.criterion = nn.L1Loss() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
lichnost/head2head
MaskedL1Loss
false
3,914
[ "MIT" ]
0
b0ec8b6965c9a32f3727dee9c164a7aaff027c5f
https://github.com/lichnost/head2head/tree/b0ec8b6965c9a32f3727dee9c164a7aaff027c5f
Attention
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import * class Attention(nn.Module): def __init__(self, opt): super(Attention, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, h, att_feats, p_att_feats, att_masks=None): att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1) att = p_att_feats.view(-1, att_size, self.att_hid_size) att_h = self.h2att(h) att_h = att_h.unsqueeze(1).expand_as(att) dot = att + att_h dot = F.tanh(dot) dot = dot.view(-1, self.att_hid_size) dot = self.alpha_net(dot) dot = dot.view(-1, att_size) weight = F.softmax(dot, dim=1) if att_masks is not None: weight = weight * att_masks.view(-1, att_size).float() weight = weight / weight.sum(1, keepdim=True) att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) return att_res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](primals_2, buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(4)](buf3, buf4, buf5, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0), out=buf7) del buf6 return reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1, (4, 4, 16), (64, 1, 4), 0), primals_6 class AttentionNew(nn.Module): def __init__(self, opt): super(AttentionNew, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, input_0, input_1, input_2): primals_3 = self.h2att.weight primals_4 = self.h2att.bias primals_6 = self.alpha_net.weight primals_7 = self.alpha_net.bias primals_5 = input_0 primals_1 = input_1 primals_2 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Zhendong-Wang/arsm_image_captioning
Attention
false
11,137
[ "MIT" ]
0
2282b76ab03b53952269d94d6c4b19ab98636ca5
https://github.com/Zhendong-Wang/arsm_image_captioning/tree/2282b76ab03b53952269d94d6c4b19ab98636ca5
SimpleMLPGen_with_meta_feature
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/mi/cmi476zw6ohnah2zxegdqc6fvw4ui6ahcdpt576v245i6ehle2hj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] # Source node to ATen node mapping: # x => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.optim import torch.jit import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), ( 1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), primals_4 class SimpleMLPGen_with_meta_featureNew(nn.Module): def __init__(self, num_in_features, num_out_features, neurons_per_layer): super(SimpleMLPGen_with_meta_featureNew, self).__init__() self.l_in = nn.Linear(in_features=num_in_features, out_features= neurons_per_layer) self.l_out = nn.Linear(in_features=neurons_per_layer, out_features= num_out_features) self.act = nn.ELU() def set_parameters(self, meta_in_features, simple_mlp_gen_obj): x = simple_mlp_gen_obj.act(simple_mlp_gen_obj.l_in(meta_in_features)) x = simple_mlp_gen_obj.l_out(x) _base = (simple_mlp_gen_obj.num_in_features * simple_mlp_gen_obj. neurons_per_layer) l_in_weight = x[:_base].reshape((simple_mlp_gen_obj.num_in_features, simple_mlp_gen_obj.neurons_per_layer)).t() l_in_bias = x[_base:_base + simple_mlp_gen_obj.neurons_per_layer] _base += simple_mlp_gen_obj.neurons_per_layer _base_add = (simple_mlp_gen_obj.neurons_per_layer * simple_mlp_gen_obj.num_out_features) l_out_weight = x[_base:_base + _base_add].reshape(( simple_mlp_gen_obj.neurons_per_layer, simple_mlp_gen_obj. num_out_features)).t() _base += _base_add l_out_bias = x[_base:] self.l_in.weight = torch.nn.Parameter(l_in_weight) self.l_out.weight = torch.nn.Parameter(l_out_weight) self.l_in.bias = torch.nn.Parameter(l_in_bias) self.l_out.bias = torch.nn.Parameter(l_out_bias) def forward(self, input_0): primals_1 = self.l_in.weight primals_2 = self.l_in.bias primals_4 = self.l_out.weight primals_5 = self.l_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
zhaofeng-shu33/deep_euler_tests
SimpleMLPGen_with_meta_feature
false
13,166
[ "MIT" ]
0
a3d0961af679d490b0c58873ee0726234122bc7a
https://github.com/zhaofeng-shu33/deep_euler_tests/tree/a3d0961af679d490b0c58873ee0726234122bc7a
MLP
import torch import numpy as np import torch.nn as nn import torch.optim as optim from collections import OrderedDict class MLP(nn.Module): def __init__(self, input_size, output_size): super(MLP, self).__init__() self.input_size = input_size self.output_size = output_size self.mlp = nn.Sequential(OrderedDict([('linear1', nn.Linear(self. input_size, 256)), ('relu1', nn.ReLU()), ('linear2', nn.Linear( 256, 128)), ('relu2', nn.ReLU()), ('linear3', nn.Linear(128, self.output_size))])) def forward(self, text): out = self.mlp(text) return out def construct_sparse_tensor(self, coo): """import torch import numpy as np from scipy.sparse import coo_matrix""" values = coo.data indices = np.vstack((coo.row, coo.col)) i = torch.LongTensor(indices) v = torch.FloatTensor(values) shape = coo.shape return torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense() def train_mlp(self, x_train: 'torch.Tensor', x_val: 'torch.Tensor', x_test: 'torch.Tensor', y_train: 'torch.Tensor', y_val: 'torch.Tensor', y_test: 'torch.Tensor', hparams: 'dict'): lr = hparams['lr'] epochs = hparams['epochs'] batch_size = hparams['batch_size'] patience = hparams['patience'] optimizer = optim.Adam(self.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() best_acc = np.NINF trace_train = [] trace_val = [] for epoch in range(epochs): running_train_loss = 0.0 running_train_acc = 0.0 None for i in tqdm(range(0, x_train.shape[0], batch_size)): batch = x_train[i:i + batch_size] label = y_train[i:i + batch_size] output = self.forward(batch) loss = criterion(output, label) predictions = output.argmax(axis=1) running_train_acc += (predictions == label).sum() optimizer.zero_grad() loss.backward() running_train_loss += loss.item() optimizer.step() running_val_loss = 0.0 running_val_acc = 0.0 for i in tqdm(range(0, x_val.shape[0], batch_size)): batch = x_val[i:i + batch_size] label = y_val[i:i + batch_size] output = self.forward(batch) predictions = output.argmax(axis=1) running_val_acc += (predictions == label).sum() loss = criterion(output, label) running_val_loss += loss.item() None trace_train.append(running_train_loss) trace_val.append(running_val_loss) if running_val_acc > best_acc: best_acc = running_val_acc best_epoch = epoch best_state = {key: value.cpu() for key, value in self. state_dict().items()} elif epoch >= best_epoch + patience: break self.load_state_dict(best_state) torch.save(best_state, 'model.pt') predictions = self.forward(x_test).argmax(axis=1) (predictions == y_test).sum() None None return trace_train, trace_val def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn import torch.optim as optim from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), primals_6, buf5, primals_4, buf6 class MLPNew(nn.Module): def __init__(self, input_size, output_size): super(MLPNew, self).__init__() self.input_size = input_size self.output_size = output_size self.mlp = nn.Sequential(OrderedDict([('linear1', nn.Linear(self. input_size, 256)), ('relu1', nn.ReLU()), ('linear2', nn.Linear( 256, 128)), ('relu2', nn.ReLU()), ('linear3', nn.Linear(128, self.output_size))])) def construct_sparse_tensor(self, coo): """import torch import numpy as np from scipy.sparse import coo_matrix""" values = coo.data indices = np.vstack((coo.row, coo.col)) i = torch.LongTensor(indices) v = torch.FloatTensor(values) shape = coo.shape return torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense() def train_mlp(self, x_train: 'torch.Tensor', x_val: 'torch.Tensor', x_test: 'torch.Tensor', y_train: 'torch.Tensor', y_val: 'torch.Tensor', y_test: 'torch.Tensor', hparams: 'dict'): lr = hparams['lr'] epochs = hparams['epochs'] batch_size = hparams['batch_size'] patience = hparams['patience'] optimizer = optim.Adam(self.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() best_acc = np.NINF trace_train = [] trace_val = [] for epoch in range(epochs): running_train_loss = 0.0 running_train_acc = 0.0 None for i in tqdm(range(0, x_train.shape[0], batch_size)): batch = x_train[i:i + batch_size] label = y_train[i:i + batch_size] output = self.forward(batch) loss = criterion(output, label) predictions = output.argmax(axis=1) running_train_acc += (predictions == label).sum() optimizer.zero_grad() loss.backward() running_train_loss += loss.item() optimizer.step() running_val_loss = 0.0 running_val_acc = 0.0 for i in tqdm(range(0, x_val.shape[0], batch_size)): batch = x_val[i:i + batch_size] label = y_val[i:i + batch_size] output = self.forward(batch) predictions = output.argmax(axis=1) running_val_acc += (predictions == label).sum() loss = criterion(output, label) running_val_loss += loss.item() None trace_train.append(running_train_loss) trace_val.append(running_val_loss) if running_val_acc > best_acc: best_acc = running_val_acc best_epoch = epoch best_state = {key: value.cpu() for key, value in self. state_dict().items()} elif epoch >= best_epoch + patience: break self.load_state_dict(best_state) torch.save(best_state, 'model.pt') predictions = self.forward(x_test).argmax(axis=1) (predictions == y_test).sum() None None return trace_train, trace_val def forward(self, input_0): primals_1 = self.mlp.linear1.weight primals_2 = self.mlp.linear1.bias primals_4 = self.mlp.linear2.weight primals_5 = self.mlp.linear2.bias primals_6 = self.mlp.linear3.weight primals_7 = self.mlp.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
andreasbinder/Stochastic-Graph-assisted-Genre-Classification
MLP
false
1,435
[ "MIT" ]
0
78752716030466f02424dcf1cbe5a66d756a13c4
https://github.com/andreasbinder/Stochastic-Graph-assisted-Genre-Classification/tree/78752716030466f02424dcf1cbe5a66d756a13c4
MyBatchNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zm/czmag3kfjhprfl74ex2gtrbasibb5zez6geq7q2q34kdf6ae4lps.py # Topologically Sorted Source Nodes: [var_mean, add, sqrt], Original ATen: [aten.var_mean, aten.add, aten.sqrt] # Source node to ATen node mapping: # add => add # sqrt => sqrt # var_mean => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [0]), kwargs = {correction: 1}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_add_sqrt_var_mean_0 = async_compile.triton('triton_poi_fused_add_sqrt_var_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sqrt_var_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sqrt_var_mean_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ub/cubnh6zgxyxindhc5zulhxvecgwfwmjretrkc27g3y2xg56gt47o.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, zed_prime, mul, zed_norm], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # sqrt => sqrt # sub => sub # var_mean => var_mean # zed_norm => add_1 # zed_prime => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [0]), kwargs = {correction: 1}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 / tmp4 tmp6 = tmp0 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var_mean, add, sqrt], Original ATen: [aten.var_mean, aten.add, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_add_sqrt_var_mean_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, zed_prime, mul, zed_norm], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1.run(primals_2, primals_1, buf0, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_2 del primals_3 return (buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sqrt_var_mean_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp24, xmask) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 / tmp4 tmp6 = tmp0 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sqrt_var_mean_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sqrt_sub_var_mean_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return buf2, primals_1 class MyBatchNormNew(nn.Module): def __init__(self, size, epsilon=1e-05): super(MyBatchNormNew, self).__init__() self.gamma = nn.Parameter(torch.ones(size)) self.beta = nn.Parameter(torch.zeros(size)) self.epsilon = epsilon def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
shohamda/deep-learning
MyBatchNorm
false
4,319
[ "MIT" ]
0
160296c403cefd5351ffe5161e07789c22637284
https://github.com/shohamda/deep-learning/tree/160296c403cefd5351ffe5161e07789c22637284
MySimpleNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nu/cnuuaznpt4szfn74bn46qfjkdypvlkfa5x44ywjpperdjt2a66rj.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # X => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # X_2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/z5/cz5xs7y3thsep5yn6qoths757rduuevog6mtea3nqr4nwnh2olnx.py # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # X_3 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/nv/cnvo7i3x3dm4mdtrcmoddo2p4odl6hgahimnieftjxkqwe7ehw54.py # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # X_3 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 640, grid=grid(640), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 128, grid=grid(128), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 128, grid=grid(128), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf8, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_2[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class MySimpleNetNew(nn.Module): """ Very simple 2-layer net, slightly adapted from the docs: https://skorch.readthedocs.io/en/stable/user/quickstart.html """ def __init__(self, num_in, num_feat, num_hidden=10, nonlin=F.relu): super(MySimpleNetNew, self).__init__() self.dense0 = nn.Linear(num_in, num_hidden) self.nonlin = nonlin self.dropout = nn.Dropout(0.5) self.dense1 = nn.Linear(num_hidden, num_feat) self.output = nn.Linear(num_feat, 2) def forward(self, input_0): primals_1 = self.dense0.weight primals_2 = self.dense0.bias primals_4 = self.dense1.weight primals_5 = self.dense1.bias primals_6 = self.output.weight primals_7 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
samxu0823/anfis-pytorch
MySimpleNet
false
4,259
[ "MIT" ]
0
b4ec3f0e8259963800e9e0a2904a580d1e56cc1c
https://github.com/samxu0823/anfis-pytorch/tree/b4ec3f0e8259963800e9e0a2904a580d1e56cc1c
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ip/cip3p4ibqio6uu76ccsemd7wjusq5ptlow3dt2zxzouyuz2sqywf.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f5/cf5pnuv5il7avsmzck3quom7r6zvcfuulsdwpzlv2epzfmcgqgwb.py # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_h_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x3), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u4/cu4fypgfipklcxtitafatnyqdaatx5tws6qfndqotcy4qivcph6d.py # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # align_vectors_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0) del buf2 return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.cuda import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttentionNew(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Luong Attention (dot, general): The full function is $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$. * dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$ * general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$ Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, coverage=False, attn_type='dot'): super(GlobalAttentionNew, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def applyMask(self, mask): self.mask = mask def score(self, h_t, h_s): """ h_t (FloatTensor): batch x tgt_len x dim h_s (FloatTensor): batch x src_len x dim returns scores (FloatTensor): batch x tgt_len x src_len: raw attention scores for each src index """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Flamexmt/LMA
GlobalAttention
false
13,715
[ "MIT" ]
321
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
PytorchBinary
import torch import torch.nn as nn import torch.nn.functional as F class PytorchBinary(nn.Module): def __init__(self, num_features): super(PytorchBinary, self).__init__() self.layer_1 = nn.Linear(num_features, 256) self.layer_out = nn.Linear(256, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): x = F.dropout(F.relu(self.layer_1(x))) x = self.sigmoid(self.layer_out(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 256), (256, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 1), (1, 256), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf5 triton_poi_fused_sigmoid_1[grid(64)](buf6, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf6, primals_4, buf7 class PytorchBinaryNew(nn.Module): def __init__(self, num_features): super(PytorchBinaryNew, self).__init__() self.layer_1 = nn.Linear(num_features, 256) self.layer_out = nn.Linear(256, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_out.weight primals_5 = self.layer_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
freescania/advdsi_at2
PytorchBinary
false
10,150
[ "MIT" ]
0
13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
https://github.com/freescania/advdsi_at2/tree/13fa0b8beaeccc28975aea40ee5a1db3dd3e33be
LearnedUpsampling1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yq/cyq4wjugvjvikmffahqz4pku6bhacbiyag4qtgzuj5w5mlbrlq42.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %view), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + ((4*x1) + (x0 % 4)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv_transpose1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class LearnedUpsampling1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input_0): primals_2 = self.bias primals_1 = self.conv_t.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fdb/samplernn-pytorch
LearnedUpsampling1d
false
15,345
[ "MIT" ]
259
87ce71cc2cf26601a271648597f198df33059f96
https://github.com/fdb/samplernn-pytorch/tree/87ce71cc2cf26601a271648597f198df33059f96
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ln/clnknwdlswmj3l56wk4ertm47scosxi4dtpndhkl7cvrs2tmbcj5.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # pow_1 => pow_1 # rsqrt => rsqrt # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %rsqrt), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Liamkuo/SAIR
PixelNorm
false
17,574
[ "MIT" ]
6
0fb289cd975b5a196b58e7d16bac00e31fd41d39
https://github.com/Liamkuo/SAIR/tree/0fb289cd975b5a196b58e7d16bac00e31fd41d39
BCEFocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/n4/cn4l67wrwka2oqlbqovgswkunvp62yjnmtlpzhqpoe2wqbjmrkfj.py # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # logp => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # loss => mul_1 # mean => mean # neg => neg_1 # p => exp_1 # pow_1 => pow_1 # sub => sub_3 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %sub_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp1 - tmp14 tmp16 = tmp1 * tmp12 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp19 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp1 - tmp14 tmp16 = tmp1 * tmp12 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp19 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCEFocalLossNew(nn.Module): """Implementation of Focal Loss for Binary Classification Problems. Focal loss was proposed in `Focal Loss for Dense Object Detection_. <https://arxiv.org/abs/1708.02002>`_. """ def __init__(self, gamma=0, eps=1e-07, reduction='mean'): """Constructor Method for FocalLoss class. Args: gamma : The focal parameter. Defaults to 0. eps : Constant for computational stability. reduction: The reduction parameter for Cross Entropy Loss. """ super(BCEFocalLossNew, self).__init__() self.gamma = gamma self.reduction = reduction self.eps = eps self.bce = torch.nn.BCEWithLogitsLoss(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Atharva-Phatak/torchflare
BCEFocalLoss
false
13,330
[ "Apache-2.0" ]
86
945f4bee73a855edd8cb19cd646731155499a27f
https://github.com/Atharva-Phatak/torchflare/tree/945f4bee73a855edd8cb19cd646731155499a27f
Sparsemax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nk/cnkmgeqruamtgccvbn4zkgty33cqveg7s4ow6q4qrojcnzzpb3wy.py # Topologically Sorted Source Nodes: [max_1, X, sort, cumsum], Original ATen: [aten.max, aten.sub, aten.sort, aten.cumsum] # Source node to ATen node mapping: # X => sub # cumsum => cumsum # max_1 => max_1 # sort => sort # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, -1, True), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%getitem_2, -1), kwargs = {}) triton_per_fused_cumsum_max_sort_sub_0 = async_compile.triton('triton_per_fused_cumsum_max_sort_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_max_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, tmp14, = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + (4*x0)), tmp8, xmask) tl.store(out_ptr1 + (r1 + (4*x0)), tmp13, xmask) tl.store(out_ptr2 + (r1 + (4*x0)), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uc/cuc2f2cgz6j7wqohouz3uu6ir7263uhsfcodyhv3lka5vxtu4ewc.py # Topologically Sorted Source Nodes: [mul, topk_cumsum, support, sum_1], Original ATen: [aten.mul, aten.sub, aten.gt, aten.sum] # Source node to ATen node mapping: # mul => mul_1 # sum_1 => sum_1 # support => gt # topk_cumsum => sub_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %getitem_2), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum, 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%mul_1, %sub_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%gt, [-1]), kwargs = {}) triton_poi_fused_gt_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_gt_mul_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 * tmp0 tmp4 = tmp3 - tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp5.to(tl.int64) tmp8 = 2.0 tmp9 = tmp8 * tmp7 tmp11 = tmp10 - tmp1 tmp12 = tmp9 > tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp6 + tmp13 tmp16 = 3.0 tmp17 = tmp16 * tmp15 tmp19 = tmp18 - tmp1 tmp20 = tmp17 > tmp19 tmp21 = tmp20.to(tl.int64) tmp22 = tmp14 + tmp21 tmp24 = 4.0 tmp25 = tmp24 * tmp23 tmp27 = tmp26 - tmp1 tmp28 = tmp25 > tmp27 tmp29 = tmp28.to(tl.int64) tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/v4/cv4frpxvm4rhejbwxhericho6gutfmihwrgomlmqytuxrptdu7cn.py # Topologically Sorted Source Nodes: [topk_cumsum, sub_2, tau, tau_1, sub_3, output], Original ATen: [aten.sub, aten.gather, aten.div, aten.clamp] # Source node to ATen node mapping: # output => clamp_min # sub_2 => sub_2 # sub_3 => sub_3 # tau => gather # tau_1 => div # topk_cumsum => sub_1 # Graph fragment: # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum, 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 1), kwargs = {}) # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%sub_1, -1, %sub_2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%gather, %unsqueeze), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %div), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {}) triton_poi_fused_clamp_div_gather_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_gather_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_gather_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 - tmp2 tmp4 = tl.full([XBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert(((0 <= tmp7) & (tmp7 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp7 < 4") tmp9 = tl.load(in_ptr2 + (tmp7 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp1.to(tl.float32) tmp13 = tmp11 / tmp12 tmp14 = tmp0 - tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1, X, sort, cumsum], Original ATen: [aten.max, aten.sub, aten.sort, aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_cumsum_max_sort_sub_0.run(arg0_1, buf0, buf1, buf3, 64, 4, grid=grid(64), stream=stream0) del arg0_1 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [mul, topk_cumsum, support, sum_1], Original ATen: [aten.mul, aten.sub, aten.gt, aten.sum] triton_poi_fused_gt_mul_sub_sum_1.run(buf1, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [topk_cumsum, sub_2, tau, tau_1, sub_3, output], Original ATen: [aten.sub, aten.gather, aten.div, aten.clamp] triton_poi_fused_clamp_div_gather_sub_2.run(buf0, buf4, buf3, buf5, 256, grid=grid(256), stream=stream0) del buf0 del buf3 del buf4 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd import Function from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask) tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask) @triton.jit def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp1 * tmp0 tmp4 = tmp3 - tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp5.to(tl.int64) tmp8 = 2.0 tmp9 = tmp8 * tmp7 tmp11 = tmp10 - tmp1 tmp12 = tmp9 > tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp6 + tmp13 tmp16 = 3.0 tmp17 = tmp16 * tmp15 tmp19 = tmp18 - tmp1 tmp20 = tmp17 > tmp19 tmp21 = tmp20.to(tl.int64) tmp22 = tmp14 + tmp21 tmp24 = 4.0 tmp25 = tmp24 * tmp23 tmp27 = tmp26 - tmp1 tmp28 = tmp25 > tmp27 tmp29 = tmp28.to(tl.int64) tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 - tmp2 tmp4 = tl.full([XBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask, 'index out of bounds: 0 <= tmp7 < 4') tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp1.to(tl.float32) tmp13 = tmp11 / tmp12 tmp14 = tmp0 - tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_max_sort_sub_0[grid(64)](arg0_1, buf0, buf1, buf3, 64, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_gt_mul_sub_sum_1[grid(64)](buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf1 del buf1 triton_poi_fused_clamp_div_gather_sub_2[grid(256)](buf0, buf4, buf3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf3 del buf4 return buf5, def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _sparsemax_threshold_and_support(X, dim=-1, k=None): """Core computation for sparsemax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: topk, _ = torch.sort(X, dim=dim, descending=True) else: topk, _ = torch.topk(X, k=k, dim=dim) topk_cumsum = topk.cumsum(dim) - 1 rhos = _make_ix_like(topk, dim) support = rhos * topk > topk_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = topk_cumsum.gather(dim, support_size - 1) tau /= support_size if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): in_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k) _roll_last(tau, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau, support_size def sparsemax(X, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxFunction.apply(X, dim, k) class SparsemaxFunction(Function): @classmethod def forward(cls, ctx, X, dim=-1, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k) output = torch.clamp(X - tau, min=0) ctx.save_for_backward(supp_size, output) return output @classmethod def backward(cls, ctx, grad_output): supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[output == 0] = 0 v_hat = grad_input.sum(dim=dim) / supp_size.squeeze() v_hat = v_hat.unsqueeze(dim) grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) return grad_input, None, None class SparsemaxNew(nn.Module): def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(SparsemaxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gitlost-murali/awesome-align
Sparsemax
false
3,554
[ "BSD-3-Clause" ]
0
39fb45ca85a98e005447bddb52c48e65ce7d399b
https://github.com/gitlost-murali/awesome-align/tree/39fb45ca85a98e005447bddb52c48e65ce7d399b
Net
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 32, (3, 3)) self.pool1 = nn.MaxPool2d((2, 2)) self.conv2 = nn.Conv2d(32, 32, (3, 3)) self.pool2 = nn.MaxPool2d((2, 2)) self.conv3 = nn.Conv2d(32, 64, (3, 3)) self.pool3 = nn.MaxPool2d((2, 2)) self.fc1 = nn.Linear(17 * 17 * 64, 64) self.fc1_drop = nn.Dropout(0.5) self.fc2 = nn.Linear(64, 1) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = F.relu(self.conv3(x)) x = self.pool3(x) x = x.view(-1, 17 * 17 * 64) x = F.relu(self.fc1(x)) x = self.fc1_drop(x) return torch.sigmoid(self.fc2(x)) def get_inputs(): return [torch.rand([4, 3, 288, 288])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 10469888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81796 % 32 x0 = xindex % 81796 x4 = xindex // 81796 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 81824 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 2617472 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 143 x1 = xindex // 143 % 143 x2 = xindex // 20449 x3 = xindex % 20449 tmp0 = tl.load(in_ptr0 + (2 * x0 + 572 * x1 + 81824 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 572 * x1 + 81824 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (286 + 2 * x0 + 572 * x1 + 81824 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (287 + 2 * x0 + 572 * x1 + 81824 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 20480 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 20480 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2544768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 19881 % 32 x0 = xindex % 19881 x4 = xindex // 19881 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 19904 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 627200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 70 x1 = xindex // 70 % 70 x2 = xindex // 4900 x3 = xindex % 4900 tmp0 = tl.load(in_ptr0 + (2 * x0 + 282 * x1 + 19904 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 282 * x1 + 19904 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (141 + 2 * x0 + 282 * x1 + 19904 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (142 + 2 * x0 + 282 * x1 + 19904 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 4928 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 4992 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4624 % 64 x0 = xindex % 4624 x4 = xindex // 4624 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 4640 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x2 = xindex // 1156 x3 = xindex % 1156 tmp0 = tl.load(in_ptr0 + (2 * x0 + 136 * x1 + 4640 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 136 * x1 + 4640 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (68 + 2 * x0 + 136 * x1 + 4640 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (69 + 2 * x0 + 136 * x1 + 4640 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x3 + 1280 * x2), tmp15, xmask) tl.store(out_ptr1 + (x3 + 1184 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_view_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18496 x1 = xindex // 18496 x2 = xindex tmp0 = tl.load(in_ptr0 + (1184 * (x0 // 1156) + 18944 * x1 + x0 % 1156), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 288, 288), (248832, 82944, 288, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 18496), (18496, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 286, 286), (2617472, 81796, 286, 1)) buf1 = empty_strided_cuda((4, 32, 286, 286), (2618368, 81824, 286, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(10469888)](buf0, primals_2, buf1, 10469888, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 143, 143), (655360, 20480, 143, 1 ), torch.float32) buf3 = empty_strided_cuda((4, 32, 143, 143), (655360, 20480, 143, 1 ), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(2617472)](buf1, buf2, buf3, 2617472, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 141, 141), (636192, 19881, 141, 1)) buf5 = empty_strided_cuda((4, 32, 141, 141), (636928, 19904, 141, 1 ), torch.float32) triton_poi_fused_convolution_relu_2[grid(2544768)](buf4, primals_5, buf5, 2544768, XBLOCK=1024, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 32, 70, 70), (157696, 4928, 70, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 70, 70), (159744, 4992, 70, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(627200)](buf5, buf6, buf7, 627200, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 68, 68), (295936, 4624, 68, 1)) buf9 = empty_strided_cuda((4, 64, 68, 68), (296960, 4640, 68, 1), torch.float32) triton_poi_fused_convolution_relu_4[grid(1183744)](buf8, primals_7, buf9, 1183744, XBLOCK=1024, num_warps=4, num_stages=1) del buf8 del primals_7 buf10 = empty_strided_cuda((4, 64, 34, 34), (81920, 1280, 34, 1), torch.int8) buf11 = empty_strided_cuda((4, 64, 34, 34), (75776, 1184, 34, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_5[grid(295936)](buf9, buf10, buf11, 295936, XBLOCK=512, num_warps=8, num_stages=1) buf12 = empty_strided_cuda((16, 18496), (18496, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_view_6[grid(295936)](buf11, buf12, 295936, XBLOCK=512, num_warps=8, num_stages=1) del buf11 buf13 = empty_strided_cuda((16, 64), (64, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (18496, 64), (1, 18496), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_7[grid(1024)](buf14, primals_9, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (64, 1), (1, 64), 0), out=buf15) buf16 = buf15 del buf15 triton_poi_fused_sigmoid_8[grid(16)](buf16, primals_11, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_11 return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf14, buf16, primals_10, primals_8) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 32, (3, 3)) self.pool1 = nn.MaxPool2d((2, 2)) self.conv2 = nn.Conv2d(32, 32, (3, 3)) self.pool2 = nn.MaxPool2d((2, 2)) self.conv3 = nn.Conv2d(32, 64, (3, 3)) self.pool3 = nn.MaxPool2d((2, 2)) self.fc1 = nn.Linear(17 * 17 * 64, 64) self.fc1_drop = nn.Dropout(0.5) self.fc2 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
CSCfi/machine-learning-scripts
Net
false
13,484
[ "MIT" ]
59
005f9343fb703ca2b6b11b5c2369e19efcaa5f62
https://github.com/CSCfi/machine-learning-scripts/tree/005f9343fb703ca2b6b11b5c2369e19efcaa5f62
AffineTransform
import torch from torch import nn class FC(nn.Module): def __init__(self, n_dim_in, n_dim_out, equal_lr=True): super().__init__() norm_const = n_dim_in ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(n_dim_out, n_dim_in)) self.bias = nn.Parameter(torch.zeros(n_dim_out)) def forward(self, x): return nn.functional.linear(x, self.scale_forward * self.weight, bias=self.bias) class AffineTransform(nn.Module): def __init__(self, n_dim_w, n_feature_maps, equal_lr): super().__init__() self.fc = FC(n_dim_w, n_feature_maps, equal_lr=equal_lr) nn.init.ones_(self.fc.bias) def forward(self, w): return self.fc(w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim_w': 4, 'n_feature_maps': 4, 'equal_lr': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class FC(nn.Module): def __init__(self, n_dim_in, n_dim_out, equal_lr=True): super().__init__() norm_const = n_dim_in ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(n_dim_out, n_dim_in)) self.bias = nn.Parameter(torch.zeros(n_dim_out)) def forward(self, x): return nn.functional.linear(x, self.scale_forward * self.weight, bias=self.bias) class AffineTransformNew(nn.Module): def __init__(self, n_dim_w, n_feature_maps, equal_lr): super().__init__() self.fc = FC(n_dim_w, n_feature_maps, equal_lr=equal_lr) nn.init.ones_(self.fc.bias) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
moritztng/stylegan2-pytorch
AffineTransform
false
4,028
[ "MIT" ]
0
8827eae2e76c54b7406b34b2d49563ae53b04001
https://github.com/moritztng/stylegan2-pytorch/tree/8827eae2e76c54b7406b34b2d49563ae53b04001
SimBasedLoss
import torch from torch import nn import torch.nn.functional as F class SimBasedLoss(nn.Module): def __init__(self): super(SimBasedLoss, self).__init__() def forward(self, y_s, y_t): y_s = F.normalize(y_s, p=2, dim=1) y_t = F.normalize(y_t, p=2, dim=1) student_sims = torch.matmul(y_s, y_s.T) teacher_sims = torch.matmul(y_t, y_t.T) loss = F.mse_loss(student_sims, teacher_sims) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = xindex // 4 % 4 x3 = xindex // 16 y0 = yindex x4 = xindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * x1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + 64 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_mse_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 64)](buf0, buf1, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = buf1 del buf1 triton_poi_fused_div_0[grid(256)](arg1_1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf4 = buf0 del buf0 triton_poi_fused_clone_1[grid(4, 64)](buf3, buf4, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) del buf3 del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_mse_loss_2[grid(1)](buf7, buf2, buf5, 1, 256, num_warps=2, num_stages=1) del buf2 del buf5 return buf7, class SimBasedLossNew(nn.Module): def __init__(self): super(SimBasedLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kctsiolis/RepDistiller
SimBasedLoss
false
3,929
[ "BSD-2-Clause" ]
0
ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
https://github.com/kctsiolis/RepDistiller/tree/ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
TransposeConv2dLayer
import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=False, scale_factor=2): super(TransposeConv2dLayer, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest', recompute_scale_factor=False) x = self.conv2d(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_1[grid(400) ](buf2, primals_3, buf3, 400, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_3 return buf2, primals_2, buf0, buf3 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='elu', norm= 'none', sn=False): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=False, scale_factor=2): super(TransposeConv2dLayerNew, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_1 = self.conv2d.conv2d.weight primals_3 = self.conv2d.conv2d.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
autocomic/deepfillv2
TransposeConv2dLayer
false
12,140
[ "MIT" ]
0
4b0f565accbf20ee90093a4504b1cff0099d9cb9
https://github.com/autocomic/deepfillv2/tree/4b0f565accbf20ee90093a4504b1cff0099d9cb9
Attention
import torch from torch import nn import torch.nn.utils class Attention(nn.Module): def __init__(self, hidden_dim): super(Attention, self).__init__() self.hidden_dim = hidden_dim self.ff = nn.Linear(in_features=hidden_dim, out_features=1) self.softmax = nn.Softmax(dim=-1) def forward(self, contexts, context_masks=None): """ :param contexts: (batch_size, seq_len, n_hid) :param context_masks: (batch_size, seq_len) :return: (batch_size, n_hid), (batch_size, seq_len) """ out = self.ff(contexts) out = out.view(contexts.size(0), contexts.size(1)) if context_masks is not None: masked_out = out.masked_fill(context_masks, float('-inf')) else: masked_out = out attn_weights = self.softmax(masked_out) out = attn_weights.unsqueeze(1).bmm(contexts) out = out.squeeze(1) return out, attn_weights def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.nn.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0 ), primals_3, out=buf4) return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf3, primals_3, buf3 class AttentionNew(nn.Module): def __init__(self, hidden_dim): super(AttentionNew, self).__init__() self.hidden_dim = hidden_dim self.ff = nn.Linear(in_features=hidden_dim, out_features=1) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_1 = self.ff.weight primals_2 = self.ff.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
bstee615/ReVeal
Attention
false
14,982
[ "MIT" ]
63
fc22d0d54a3a23d4e0bc45a249b7eea22749685e
https://github.com/bstee615/ReVeal/tree/fc22d0d54a3a23d4e0bc45a249b7eea22749685e
FFModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/bm/cbmvwkhgioz63mnhrh3onxemouh4axyclce6ay7mypmzm62glj7h.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/gn/cgn3tpasui6fv3xxba47jzqip7bgipyrz4akedry64e2fx5k4rvd.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/p3/cp3utc5v23sn2cyk7duypeqnubya7xxdpwoonppg3f7ngk5462gb.py # Topologically Sorted Source Nodes: [sigmoid, x_2], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # x_2 => mul_2 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_2], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](buf3, buf4, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0 ), primals_6, primals_4 def swish(x): return x * torch.sigmoid(x) class FFModuleNew(nn.Module): def __init__(self, d_model, h_size, dropout=0.2): super(FFModuleNew, self).__init__() self.layer_norm = nn.LayerNorm(d_model) self.layer1 = nn.Linear(d_model, h_size) self.swish_activation = swish self.dropout = nn.Dropout(dropout) self.layer2 = nn.Linear(h_size, d_model) def forward(self, input_0): primals_1 = self.layer_norm.weight primals_2 = self.layer_norm.bias primals_4 = self.layer1.weight primals_5 = self.layer1.bias primals_6 = self.layer2.weight primals_7 = self.layer2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Masao-Someki/Conformer
FFModule
false
8,529
[ "MIT" ]
18
866da9ae05a6d07304775c592caac8d516f67c92
https://github.com/Masao-Someki/Conformer/tree/866da9ae05a6d07304775c592caac8d516f67c92
CircleLoss
import torch from torch import Tensor from torch import nn class CircleLoss(nn.Module): def __init__(self, m: 'float', gamma: 'float') ->None: super(CircleLoss, self).__init__() self.m = m self.gamma = gamma self.soft_plus = nn.Softplus() def forward(self, sp: 'Tensor', sn: 'Tensor') ->Tensor: ap = torch.clamp_min(-sp.detach() + 1 + self.m, min=0.0) an = torch.clamp_min(sn.detach() + self.m, min=0.0) delta_p = 1 - self.m delta_n = self.m logit_p = -ap * (sp - delta_p) * self.gamma logit_n = an * (sn - delta_n) * self.gamma loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch. logsumexp(logit_p, dim=0)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'gamma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0(in_out_ptr0 , in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr0 + (64 + x0), xmask) tmp15 = tl.load(in_ptr0 + (128 + x0), xmask) tmp22 = tl.load(in_ptr0 + (192 + x0), xmask) tmp44 = tl.load(in_ptr1 + x0, xmask) tmp55 = tl.load(in_ptr1 + (64 + x0), xmask) tmp65 = tl.load(in_ptr1 + (128 + x0), xmask) tmp75 = tl.load(in_ptr1 + (192 + x0), xmask) tmp1 = 4.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 - tmp1 tmp6 = tmp4 * tmp5 tmp7 = tmp6 * tmp1 tmp9 = tmp8 + tmp1 tmp10 = triton_helpers.maximum(tmp9, tmp3) tmp11 = tmp8 - tmp1 tmp12 = tmp10 * tmp11 tmp13 = tmp12 * tmp1 tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp16 = tmp15 + tmp1 tmp17 = triton_helpers.maximum(tmp16, tmp3) tmp18 = tmp15 - tmp1 tmp19 = tmp17 * tmp18 tmp20 = tmp19 * tmp1 tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp23 = tmp22 + tmp1 tmp24 = triton_helpers.maximum(tmp23, tmp3) tmp25 = tmp22 - tmp1 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp1 tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tl_math.abs(tmp28) tmp30 = float('inf') tmp31 = tmp29 == tmp30 tmp32 = tl.where(tmp31, tmp3, tmp28) tmp33 = tmp7 - tmp32 tmp34 = tl_math.exp(tmp33) tmp35 = tmp13 - tmp32 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp20 - tmp32 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp41 = tmp27 - tmp32 tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tmp45 = -tmp44 tmp46 = 1.0 tmp47 = tmp45 + tmp46 tmp48 = tmp47 + tmp1 tmp49 = triton_helpers.maximum(tmp48, tmp3) tmp50 = -tmp49 tmp51 = -3.0 tmp52 = tmp44 - tmp51 tmp53 = tmp50 * tmp52 tmp54 = tmp53 * tmp1 tmp56 = -tmp55 tmp57 = tmp56 + tmp46 tmp58 = tmp57 + tmp1 tmp59 = triton_helpers.maximum(tmp58, tmp3) tmp60 = -tmp59 tmp61 = tmp55 - tmp51 tmp62 = tmp60 * tmp61 tmp63 = tmp62 * tmp1 tmp64 = triton_helpers.maximum(tmp54, tmp63) tmp66 = -tmp65 tmp67 = tmp66 + tmp46 tmp68 = tmp67 + tmp1 tmp69 = triton_helpers.maximum(tmp68, tmp3) tmp70 = -tmp69 tmp71 = tmp65 - tmp51 tmp72 = tmp70 * tmp71 tmp73 = tmp72 * tmp1 tmp74 = triton_helpers.maximum(tmp64, tmp73) tmp76 = -tmp75 tmp77 = tmp76 + tmp46 tmp78 = tmp77 + tmp1 tmp79 = triton_helpers.maximum(tmp78, tmp3) tmp80 = -tmp79 tmp81 = tmp75 - tmp51 tmp82 = tmp80 * tmp81 tmp83 = tmp82 * tmp1 tmp84 = triton_helpers.maximum(tmp74, tmp83) tmp85 = tl_math.abs(tmp84) tmp86 = tmp85 == tmp30 tmp87 = tl.where(tmp86, tmp3, tmp84) tmp88 = tmp54 - tmp87 tmp89 = tl_math.exp(tmp88) tmp90 = tmp63 - tmp87 tmp91 = tl_math.exp(tmp90) tmp92 = tmp89 + tmp91 tmp93 = tmp73 - tmp87 tmp94 = tl_math.exp(tmp93) tmp95 = tmp92 + tmp94 tmp96 = tmp83 - tmp87 tmp97 = tl_math.exp(tmp96) tmp98 = tmp95 + tmp97 tmp99 = tl_math.log(tmp43) tmp100 = tmp99 + tmp32 tmp101 = tl_math.log(tmp98) tmp102 = tmp101 + tmp87 tmp103 = tmp100 + tmp102 tmp104 = tmp103 * tmp46 tmp105 = 20.0 tmp106 = tmp104 > tmp105 tmp107 = tl_math.exp(tmp104) tmp108 = libdevice.log1p(tmp107) tmp109 = tmp108 * tmp46 tmp110 = tl.where(tmp106, tmp103, tmp109) tl.store(in_out_ptr0 + x0, tmp110, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0[grid (64)](buf5, arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf5, class CircleLossNew(nn.Module): def __init__(self, m: 'float', gamma: 'float') ->None: super(CircleLossNew, self).__init__() self.m = m self.gamma = gamma self.soft_plus = nn.Softplus() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
fabiozappo/Person_reID_tensorrt
CircleLoss
false
6,675
[ "Apache-2.0" ]
1
164441f35777698274e7664a9aefcc8d54467dc3
https://github.com/fabiozappo/Person_reID_tensorrt/tree/164441f35777698274e7664a9aefcc8d54467dc3
MSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6a/c6at7k6mlvvyb6bhxconu56vmlljdfsi5u47zlmh7d3irv3tnmhl.py # Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # diffs => add # mse => div # neg => neg # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {}) triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.00390625 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.00390625 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSENew(nn.Module): def __init__(self): super(MSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Clement25/Multimodal-Attack
MSE
false
285
[ "MIT" ]
0
bd04ee099d457e87b6e6ee918c03f65a589bcb9a
https://github.com/Clement25/Multimodal-Attack/tree/bd04ee099d457e87b6e6ee918c03f65a589bcb9a
Transition
import torch import torch.nn as nn import torch.nn.functional as F class Transition(nn.Module): def __init__(self, in_planes, out_planes): super(Transition, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, x): out = self.conv(F.relu(x)) out = F.avg_pool2d(out, 2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_2, buf0, buf2 class TransitionNew(nn.Module): def __init__(self, in_planes, out_planes): super(TransitionNew, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mahoumaru/auto_LiRPA
Transition
false
11,673
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BiaffineScorer
import torch import torch.nn as nn class BiaffineScorer(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input1, input2): input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1) ], len(input1.size()) - 1) input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1) ], len(input2.size()) - 1) return self.W_bilin(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0 ), reinterpret_tensor(buf1, (64, 5), (5, 1), 0) class BiaffineScorerNew(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input_0, input_1): primals_3 = self.W_bilin.weight primals_4 = self.W_bilin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
NLPInBLCU/BiaffineDependencyParsing
BiaffineScorer
false
14,074
[ "MIT" ]
67
40b133648c747957dacd59916add0403371fe680
https://github.com/NLPInBLCU/BiaffineDependencyParsing/tree/40b133648c747957dacd59916add0403371fe680
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 return (buf9, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf9) del primals_5 return buf9, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, *args, **kargs): super().__init__() self.attention = nn.MultiheadAttention(*args, **kargs) def forward(self, input_0): primals_2 = self.attention.in_proj_weight primals_3 = self.attention.in_proj_bias primals_1 = self.attention.out_proj.weight primals_5 = self.attention.out_proj.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
eitin-infant/FinRL-Meta
SelfAttention
false
15,295
[ "MIT" ]
214
4c94011e58425796e7e2e5c1bf848afd65c828d6
https://github.com/eitin-infant/FinRL-Meta/tree/4c94011e58425796e7e2e5c1bf848afd65c828d6
FixupBasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pb/cpbshtrpnaucbs7hqoiqkvsndkguzk4er52bxaovgoqcnlnyqv6v.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ii/ciidyz7k62ig6agzsjgqpv736t42hwampit52unv6yf3hsf7nm72.py # Topologically Sorted Source Nodes: [add_1, out_1, add_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # out_1 => relu # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp8 = tmp5 + tmp7 tmp9 = 0.0 tmp10 = tmp5 <= tmp9 tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ru/cruzz4bw56op4a2umcojvqo7x3yllo524t6ildzprzx27ua6kcm5.py # Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # mul => mul # out_3 => add_3 # out_4 => add_4 # out_5 => relu_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_8), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_mul_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_mul_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr3 + (x0), xmask) tmp3 = tmp0 * tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 0.0 tmp12 = tmp10 <= tmp11 tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr1 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_1, out_1, add_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_1.run(buf1, primals_4, primals_5, buf2, buf6, 256, grid=grid(256), stream=stream0) del primals_4 del primals_5 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf1; del buf1 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_mul_relu_threshold_backward_2.run(buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_8 return (buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp8 = tmp5 + tmp7 tmp9 = 0.0 tmp10 = tmp5 <= tmp9 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr3 + x0, xmask) tmp3 = tmp0 * tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 0.0 tmp12 = tmp10 <= tmp11 tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1, primals_4, primals_5, buf2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 del primals_5 buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf1 del buf1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_8 return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6 def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class FixupBasicBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(FixupBasicBlockNew, self).__init__() self.bias1a = nn.Parameter(torch.zeros(1)) self.conv1 = conv3x3(inplanes, planes, stride) self.bias1b = nn.Parameter(torch.zeros(1)) self.relu = nn.ReLU(inplace=True) self.bias2a = nn.Parameter(torch.zeros(1)) self.conv2 = conv3x3(planes, planes) self.scale = nn.Parameter(torch.ones(1)) self.bias2b = nn.Parameter(torch.zeros(1)) self.downsample = downsample self.stride = stride def forward(self, input_0): primals_2 = self.bias1a primals_4 = self.bias1b primals_5 = self.bias2a primals_7 = self.scale primals_8 = self.bias2b primals_3 = self.conv1.weight primals_6 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
bethgelab/robustness
FixupBasicBlock
false
14,954
[ "Apache-2.0" ]
67
aa0a6798fe3973bae5f47561721b59b39f126ab7
https://github.com/bethgelab/robustness/tree/aa0a6798fe3973bae5f47561721b59b39f126ab7
MNACLayer
import collections import math import torch import torch.utils.data def sparsity_error(W): W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W))) return torch.max(W_error) def mnac(x, W, mode='prod'): out_size, in_size = W.size() x = x.view(x.size()[0], in_size, 1) W = W.t().view(1, in_size, out_size) if mode == 'prod': return torch.prod(x * W + 1 - W, -2) elif mode == 'exp-log': return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2)) elif mode == 'no-idendity': return torch.prod(x * W, -2) else: raise ValueError(f'mnac mode "{mode}" is not implemented') class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class MNACLayer(ExtendedTorchModule): """Implements the NAC (Neural Accumulator) Arguments: in_features: number of ingoing features out_features: number of outgoing features """ def __init__(self, in_features, out_features, **kwargs): super().__init__('nac', **kwargs) self.in_features = in_features self.out_features = out_features self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.register_parameter('bias', None) def reset_parameters(self): std = math.sqrt(0.25) r = math.sqrt(3.0) * std torch.nn.init.uniform_(self.W_hat, -r, r) def forward(self, x, reuse=False): W = torch.sigmoid(self.W_hat) self.writer.add_histogram('W', W) self.writer.add_tensor('W', W) self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False) return mnac(x, W) def extra_repr(self): return 'in_features={}, out_features={}'.format(self.in_features, self.out_features) def get_inputs(): return [torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import collections import math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp2 tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_prod_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sub_0[grid(64)](primals_2, primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_prod_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_1, primals_2, buf0 def sparsity_error(W): W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W))) return torch.max(W_error) def mnac(x, W, mode='prod'): out_size, in_size = W.size() x = x.view(x.size()[0], in_size, 1) W = W.t().view(1, in_size, out_size) if mode == 'prod': return torch.prod(x * W + 1 - W, -2) elif mode == 'exp-log': return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2)) elif mode == 'no-idendity': return torch.prod(x * W, -2) else: raise ValueError(f'mnac mode "{mode}" is not implemented') class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class MNACLayerNew(ExtendedTorchModule): """Implements the NAC (Neural Accumulator) Arguments: in_features: number of ingoing features out_features: number of outgoing features """ def __init__(self, in_features, out_features, **kwargs): super().__init__('nac', **kwargs) self.in_features = in_features self.out_features = out_features self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.register_parameter('bias', None) def reset_parameters(self): std = math.sqrt(0.25) r = math.sqrt(3.0) * std torch.nn.init.uniform_(self.W_hat, -r, r) def extra_repr(self): return 'in_features={}, out_features={}'.format(self.in_features, self.out_features) def forward(self, input_0): primals_1 = self.W_hat primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
wlm2019/Neural-Arithmetic-Units
MNACLayer
false
16,716
[ "MIT" ]
147
f9de9d004bb2dc2ee28577cd1760d0a00c185836
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
HardAttn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ow/cowjexxxal2b7gsm3dt4uy42yticfka2kv6a5zlnecjnm6klax5t.py # Topologically Sorted Source Nodes: [theta], Original ATen: [aten.tanh, aten.tanh_backward] # Source node to ATen node mapping: # theta => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) triton_poi_fused_tanh_tanh_backward_1 = async_compile.triton('triton_poi_fused_tanh_tanh_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_tanh_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [theta], Original ATen: [aten.tanh, aten.tanh_backward] triton_poi_fused_tanh_tanh_backward_1.run(buf2, primals_3, buf3, 32, grid=grid(32), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 2), (8, 2, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_1[grid(32)](buf2, primals_3, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3 class HardAttnNew(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttnNew, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float)) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Danish-VSL/deep-person-reid
HardAttn
false
13,551
[ "MIT" ]
244
2e3a4b6706b84c77203f9905683b917ab0871b93
https://github.com/Danish-VSL/deep-person-reid/tree/2e3a4b6706b84c77203f9905683b917ab0871b93
CMMD
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/f6/cf6zadwv7a5akbud7ttdbdc565ql6iiuysbozq7xwvkys7sxo7oz.py # Topologically Sorted Source Nodes: [feat_v_1], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # feat_v_1 => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/o6/co65xeotc4w3dat3ahcsyaw4klhv5mxlia7b5j6qfhr2a27ig27r.py # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, mul_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # sum_1 => sum_3 # sum_2 => sum_4 # sum_3 => sum_5 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %unsqueeze_3), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_4, %unsqueeze_5), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) triton_per_fused_mul_sum_1 = async_compile.triton('triton_per_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (r2 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp8 = libdevice.sqrt(tmp7) tmp9 = triton_helpers.maximum(tmp8, tmp3) tmp10 = tmp6 / tmp9 tmp11 = tmp5 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp18 = libdevice.sqrt(tmp17) tmp19 = triton_helpers.maximum(tmp18, tmp3) tmp20 = tmp16 / tmp19 tmp23 = libdevice.sqrt(tmp22) tmp24 = triton_helpers.maximum(tmp23, tmp3) tmp25 = tmp21 / tmp24 tmp26 = tmp20 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.sum(tmp29, 1)[:, None] tmp31 = tmp5 * tmp25 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.where(xmask, tmp32, 0) tmp35 = tl.sum(tmp34, 1)[:, None] tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp30, xmask) tl.store(out_ptr2 + (x3), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ts/ctsxkk536uyp5fec3ryq3tilzzjb36yephn2kbrcplax6x4ngjxm.py # Topologically Sorted Source Nodes: [res, mean, res_1, mean_1, add, res_2, mean_2, mul_3, sub, add_1, loss], Original ATen: [aten.pow, aten.mean, aten.add, aten.mul, aten.sub, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # loss => div_2 # mean => mean # mean_1 => mean_1 # mean_2 => mean_2 # mul_3 => mul_3 # res => pow_5 # res_1 => pow_6 # res_2 => pow_7 # sub => sub # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_6,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_7,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 0), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, 1), kwargs = {}) triton_per_fused_add_div_mean_mul_pow_sub_2 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_ptr2 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 16.0 tmp16 = tmp4 / tmp15 tmp17 = tmp9 / tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp14 / tmp15 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = tmp18 - tmp21 tmp23 = 0.0 tmp24 = tmp22 + tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [feat_v_1], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [feat_t_1], Original ATen: [aten.linalg_vector_norm] triton_per_fused_linalg_vector_norm_0.run(arg1_1, buf1, 4, 64, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, mul_2, sum_3], Original ATen: [aten.mul, aten.sum] triton_per_fused_mul_sum_1.run(arg0_1, buf0, arg1_1, buf1, buf2, buf4, buf6, 16, 64, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [res, mean, res_1, mean_1, add, res_2, mean_2, mul_3, sub, add_1, loss], Original ATen: [aten.pow, aten.mean, aten.add, aten.mul, aten.sub, aten.div] triton_per_fused_add_div_mean_mul_pow_sub_2.run(buf8, buf2, buf4, buf6, 1, 16, grid=grid(1), stream=stream0) del buf2 del buf4 del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (r2 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp8 = libdevice.sqrt(tmp7) tmp9 = triton_helpers.maximum(tmp8, tmp3) tmp10 = tmp6 / tmp9 tmp11 = tmp5 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp18 = libdevice.sqrt(tmp17) tmp19 = triton_helpers.maximum(tmp18, tmp3) tmp20 = tmp16 / tmp19 tmp23 = libdevice.sqrt(tmp22) tmp24 = triton_helpers.maximum(tmp23, tmp3) tmp25 = tmp21 / tmp24 tmp26 = tmp20 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.sum(tmp29, 1)[:, None] tmp31 = tmp5 * tmp25 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.where(xmask, tmp32, 0) tmp35 = tl.sum(tmp34, 1)[:, None] tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp30, xmask) tl.store(out_ptr2 + x3, tmp35, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 16.0 tmp16 = tmp4 / tmp15 tmp17 = tmp9 / tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp14 / tmp15 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = tmp18 - tmp21 tmp23 = 0.0 tmp24 = tmp22 + tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_per_fused_linalg_vector_norm_0[grid(4)](arg1_1, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_mul_sum_1[grid(16)](arg0_1, buf0, arg1_1, buf1, buf2, buf4, buf6, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3 del buf3 triton_per_fused_add_div_mean_mul_pow_sub_2[grid(1)](buf8, buf2, buf4, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 del buf6 return buf8, class CMMDNew(nn.Module): def __init__(self, num_pos): super(CMMDNew, self).__init__() self.num_pos = num_pos def mmd_loss(self, f_v, f_t): return self.poly_kernel(f_v, f_v).mean() + self.poly_kernel(f_t, f_t ).mean() - 2 * self.poly_kernel(f_v, f_t).mean() def poly_kernel(self, a, b): a = a.unsqueeze(0) b = b.unsqueeze(1) res = (a * b).sum(-1).pow(2) return res def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JDAI-CV/CM-NAS
CMMD
false
8,302
[ "Apache-2.0" ]
31
bbc77f427b2c8afb9f3865f5a04e86079d33dd28
https://github.com/JDAI-CV/CM-NAS/tree/bbc77f427b2c8afb9f3865f5a04e86079d33dd28
GAT
import torch import torch.nn as nn import torch.nn.functional as F class GAT(nn.Module): def __init__(self, num_feats): super(GAT, self).__init__() self.num_feats = num_feats self.weight_key = nn.Parameter(torch.zeros(size=(self.num_feats, 1))) self.weight_query = nn.Parameter(torch.zeros(size=(self.num_feats, 1))) nn.init.xavier_uniform_(self.weight_key, gain=1.414) nn.init.xavier_uniform_(self.weight_query, gain=1.414) self.dropout = nn.Dropout(0.5) def forward(self, x): """ :param x: dim: bz x num_node x num_feat :return: dim: bz x num_node x num_node """ batch_size = x.size(0) num_nodes = x.size(1) key = torch.matmul(x, self.weight_key) query = torch.matmul(x, self.weight_query) attn_input = key.repeat(1, 1, num_nodes).view(batch_size, num_nodes * num_nodes, 1) + query.repeat(1, num_nodes, 1) attn_output = attn_input.squeeze(2).view(batch_size, num_nodes, num_nodes) attn_output = F.leaky_relu(attn_output, negative_slope=0.2) attention = F.softmax(attn_output, dim=2) attention = self.dropout(attention) attn_feat = torch.matmul(attention, x).permute(0, 2, 1) return attn_feat def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_feats': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2 // 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x1 + x0 % 4), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp11 = tmp10 > tmp1 tmp12 = tmp10 * tmp3 tmp13 = tl.where(tmp11, tmp10, tmp12) tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 > tmp1 tmp17 = tmp15 * tmp3 tmp18 = tl.where(tmp16, tmp15, tmp17) tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp21 = tmp20 > tmp1 tmp22 = tmp20 * tmp3 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp19, tmp23) tmp25 = tmp5 - tmp24 tmp26 = tl_math.exp(tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_repeat_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, primals_1, out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0 ), primals_1, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) class GATNew(nn.Module): def __init__(self, num_feats): super(GATNew, self).__init__() self.num_feats = num_feats self.weight_key = nn.Parameter(torch.zeros(size=(self.num_feats, 1))) self.weight_query = nn.Parameter(torch.zeros(size=(self.num_feats, 1))) nn.init.xavier_uniform_(self.weight_key, gain=1.414) nn.init.xavier_uniform_(self.weight_query, gain=1.414) self.dropout = nn.Dropout(0.5) def forward(self, input_0): primals_2 = self.weight_key primals_3 = self.weight_query primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
juaduan/babybrainguardian
GAT
false
6,992
[ "MIT" ]
1
b871e3a83fef98c2e05dd8857721a3c964a46418
https://github.com/juaduan/babybrainguardian/tree/b871e3a83fef98c2e05dd8857721a3c964a46418
SamePadConv2d
import torch from torch.nn import functional as F import torch.nn as nn class SamePadConv2d(nn.Conv2d): """ Conv with TF padding='same' https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036 """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, padding_mode='zeros'): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, padding_mode) def get_pad_odd(self, in_, weight, stride, dilation): effective_filter_size_rows = (weight - 1) * dilation + 1 out_rows = (in_ + stride - 1) // stride max(0, (out_rows - 1) * stride + effective_filter_size_rows - in_) padding_rows = max(0, (out_rows - 1) * stride + (weight - 1) * dilation + 1 - in_) rows_odd = padding_rows % 2 != 0 return padding_rows, rows_odd def forward(self, x): padding_rows, rows_odd = self.get_pad_odd(x.shape[2], self.weight. shape[2], self.stride[0], self.dilation[0]) padding_cols, cols_odd = self.get_pad_odd(x.shape[3], self.weight. shape[3], self.stride[1], self.dilation[1]) if rows_odd or cols_odd: x = F.pad(x, [0, int(cols_odd), 0, int(rows_odd)]) return F.conv2d(x, self.weight, self.bias, self.stride, padding=( padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class SamePadConv2dNew(nn.Conv2d): """ Conv with TF padding='same' https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036 """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, padding_mode='zeros'): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, padding_mode) def get_pad_odd(self, in_, weight, stride, dilation): effective_filter_size_rows = (weight - 1) * dilation + 1 out_rows = (in_ + stride - 1) // stride max(0, (out_rows - 1) * stride + effective_filter_size_rows - in_) padding_rows = max(0, (out_rows - 1) * stride + (weight - 1) * dilation + 1 - in_) rows_odd = padding_rows % 2 != 0 return padding_rows, rows_odd def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jjeamin/obJDetection
SamePadConv2d
false
6,951
[ "MIT" ]
1
eb7fbc410beb00fad1a6477e827e9ce2d8efbac5
https://github.com/jjeamin/obJDetection/tree/eb7fbc410beb00fad1a6477e827e9ce2d8efbac5
F_conv
import torch import warnings import torch.nn as nn import torch.nn.functional as F class F_conv(nn.Module): """ResNet transformation, not itself reversible, just used below""" def __init__(self, in_channels, channels, channels_hidden=None, stride= None, kernel_size=3, leaky_slope=0.1, batch_norm=False): super(F_conv, self).__init__() if stride: warnings.warn( "Stride doesn't do anything, the argument should be removed", DeprecationWarning) if not channels_hidden: channels_hidden = channels pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, channels_hidden, kernel_size= kernel_size, padding=pad, bias=not batch_norm) self.conv2 = nn.Conv2d(channels_hidden, channels_hidden, kernel_size=kernel_size, padding=pad, bias=not batch_norm) self.conv3 = nn.Conv2d(channels_hidden, channels, kernel_size= kernel_size, padding=pad, bias=not batch_norm) if batch_norm: self.bn1 = nn.BatchNorm2d(channels_hidden) self.bn1.weight.data.fill_(1) self.bn2 = nn.BatchNorm2d(channels_hidden) self.bn2.weight.data.fill_(1) self.bn3 = nn.BatchNorm2d(channels) self.bn3.weight.data.fill_(1) self.batch_norm = batch_norm def forward(self, x): out = self.conv1(x) if self.batch_norm: out = self.bn1(out) out = F.leaky_relu(out, self.leaky_slope) out = self.conv2(out) if self.batch_norm: out = self.bn2(out) out = F.leaky_relu(out, self.leaky_slope) out = self.conv3(out) if self.batch_norm: out = self.bn3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import warnings import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf3, primals_5, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(256)](buf7, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5) class F_convNew(nn.Module): """ResNet transformation, not itself reversible, just used below""" def __init__(self, in_channels, channels, channels_hidden=None, stride= None, kernel_size=3, leaky_slope=0.1, batch_norm=False): super(F_convNew, self).__init__() if stride: warnings.warn( "Stride doesn't do anything, the argument should be removed", DeprecationWarning) if not channels_hidden: channels_hidden = channels pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, channels_hidden, kernel_size= kernel_size, padding=pad, bias=not batch_norm) self.conv2 = nn.Conv2d(channels_hidden, channels_hidden, kernel_size=kernel_size, padding=pad, bias=not batch_norm) self.conv3 = nn.Conv2d(channels_hidden, channels, kernel_size= kernel_size, padding=pad, bias=not batch_norm) if batch_norm: self.bn1 = nn.BatchNorm2d(channels_hidden) self.bn1.weight.data.fill_(1) self.bn2 = nn.BatchNorm2d(channels_hidden) self.bn2.weight.data.fill_(1) self.bn3 = nn.BatchNorm2d(channels) self.bn3.weight.data.fill_(1) self.batch_norm = batch_norm def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ramonpeter/LaSeR
F_conv
false
7,531
[ "MIT" ]
1
28daa6876256501ed0d3e84a4ddfedc7892bd528
https://github.com/ramonpeter/LaSeR/tree/28daa6876256501ed0d3e84a4ddfedc7892bd528