entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Dueling_Critic
import torch import torch.nn.functional as F import torch.nn as nn class Dueling_Critic(nn.Module): def __init__(self, input_size, output_size, hidden_size): super().__init__() self.input_size = input_size self.output_size = output_size self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, 1) self.linear3 = nn.Linear(hidden_size, output_size) def forward(self, x): x1 = F.relu(self.linear1(x)) x2 = F.relu(self.linear1(x)) y1 = self.linear2(x1) y2 = self.linear3(x2) x3 = y1 + y2 - y2.mean(dim=1, keepdim=True) return x3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x5, xmask) tmp6 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (16 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (32 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr2 + (48 + x6 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp12 = tmp10 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = tmp5 - tmp14 tl.store(out_ptr0 + x5, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mean_sub_1[grid(256)](buf2, primals_5, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), primals_6, primals_4, buf5 class Dueling_CriticNew(nn.Module): def __init__(self, input_size, output_size, hidden_size): super().__init__() self.input_size = input_size self.output_size = output_size self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, 1) self.linear3 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
FlickerNiko/ai_lib
Dueling_Critic
false
13,693
[ "MIT" ]
99
7087d4569c9a827d35dd8735b55a080834d31a82
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
EncoderImagePrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5x/c5xuny2q3w4rfyqbc6h5bf6h7phwdd6eg4xkubmoahyiibq64bkx.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] # Source node to ATen node mapping: # X => div # norm => add # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ChopinSharp/SCAN
EncoderImagePrecomp
false
4,994
[ "Apache-2.0" ]
1
4a165b2aeb3007685054d0c550540893b2006b17
https://github.com/ChopinSharp/SCAN/tree/4a165b2aeb3007685054d0c550540893b2006b17
ConcatConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nd/cndqmv6opasogda3abybssrolojxtgspiw4l44wajkbuoorfwxgs.py # Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat] # Source node to ATen node mapping: # ttx => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 5 x0 = xindex % 4 x2 = (xindex // 20) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4*((-1) + x1)) + (16*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/gi/cgiwilqietbdjis33kn5fz2pnnroyzpbn4tycl3qtynjtmghkqyd.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 2) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 5, 3), (15, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 80, grid=grid(80), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2), (8, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_4, 32, grid=grid(32), stream=stream0) del primals_4 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 5, 3), (15, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x0 = xindex % 4 x2 = xindex // 20 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-1 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 2 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (4, 5, 3), (15, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_2, primals_1, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2), (8, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(32)](buf2, primals_4, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_4 return buf2, primals_3, buf0 class ConcatConv1dNew(nn.Module): """ 1d convolution concatenated with time for usage in ODENet. """ def __init__(self, dim_in, dim_out, kernel_size=3, stride=1, padding=0, bias=True, transpose=False): super(ConcatConv1dNew, self).__init__() module = nn.ConvTranspose1d if transpose else nn.Conv1d self._layer = module(dim_in + 1, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) def forward(self, input_0, input_1): primals_3 = self._layer.weight primals_4 = self._layer.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
puneat/SS-using-NODE
ConcatConv1d
false
4,147
[ "MIT" ]
0
29f053769420a2d1cab1ad45f59a912c2ac737da
https://github.com/puneat/SS-using-NODE/tree/29f053769420a2d1cab1ad45f59a912c2ac737da
SimpleCumSumModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/3c/c3cpu3wz3bo3cso4qi4ii34iyrr3niuqsvkxdjr6hbwpvju6c4oe.py # Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum] # Source node to ATen node mapping: # cumsum => cumsum # Graph fragment: # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%arg0_1, 4), kwargs = {}) triton_per_fused_cumsum_0 = async_compile.triton('triton_per_fused_cumsum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[256, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + (4*x0)), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_cumsum_0.run(arg0_1, buf0, 256, 4, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 256 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_0[grid(256)](arg0_1, buf0, 256, 4, XBLOCK= 32, num_warps=2, num_stages=1) del arg0_1 return buf0, class SimpleCumSumModuleNew(torch.nn.Module): def __init__(self, dim): super(SimpleCumSumModuleNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleCumSumModule
false
12,570
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
ResNetBlockGroupNorm
import torch import torch.nn as nn def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResNetBlockGroupNorm(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, activation= 'relu'): super(ResNetBlockGroupNorm, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.conv1 = conv3x3(inplanes, planes, stride) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.conv2 = conv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.GroupNorm( num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, x): residual = x out = self.conv1(x) out = self.gn1(out) out = self.activation(out) out = self.conv2(out) out = self.gn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid (4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor( buf10, (4, 1), (1, 1), 0), buf12) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResNetBlockGroupNormNew(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, activation= 'relu'): super(ResNetBlockGroupNormNew, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.conv1 = conv3x3(inplanes, planes, stride) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.conv2 = conv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.GroupNorm( num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.gn1.weight primals_4 = self.gn1.bias primals_5 = self.conv2.weight primals_6 = self.gn2.weight primals_7 = self.gn2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
andrecianflone/wolf
ResNetBlockGroupNorm
false
14,857
[ "Apache-2.0" ]
75
826bbedc58d4d29871110349356868066a3108e6
https://github.com/andrecianflone/wolf/tree/826bbedc58d4d29871110349356868066a3108e6
InnerProductLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/dx/cdxuxa4usulotytq2uhe2lpzvskzdaihvkxtup7hxtpolwz634sr.py # Topologically Sorted Source Nodes: [p], Original ATen: [aten.cat] # Source node to ATen node mapping: # p => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select, %select_1, %select_2, %select_3, %select_4, %select_5], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 24 x0 = xindex % 4 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tl.load(in_ptr0 + (128 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + (x3), tmp34, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3d/c3d5ce25tfocluzh3yh76j6qjeohfmdemkusyhh2wn2xdyzoda7p.py # Topologically Sorted Source Nodes: [q], Original ATen: [aten.cat] # Source node to ATen node mapping: # q => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_6, %select_7, %select_8, %select_9, %select_10, %select_11], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 24 x0 = xindex % 4 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 + x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (128 + x0 + (4*((-4) + x1)) + (16*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (192 + x0 + (4*((-8) + x1)) + (16*x2)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (128 + x0 + (4*((-12) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (192 + x0 + (4*((-16) + x1)) + (16*x2)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tl.load(in_ptr0 + (192 + x0 + (4*((-20) + x1)) + (16*x2)), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + (x3), tmp34, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/wm/cwmiv6irrhvdznioij35ss6z35p7cnponuwhiju4tyoert736dwz.py # Topologically Sorted Source Nodes: [inner_product, inner_product_1], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # inner_product => mul # inner_product_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %cat_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2], True), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0) buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 384, grid=grid(384), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [inner_product, inner_product_1], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf0, buf1, buf2, 96, grid=grid(96), stream=stream0) del buf0 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x3, tmp34, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x3, tmp34, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(384)](arg0_1, buf1, 384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(96)](buf0, buf1, buf2, 96, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. Input shape - a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape: ``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum. Arguments - **reduce_sum**: bool. Whether return inner product or element-wise product References - [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]// Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.] (https://arxiv.org/pdf/1611.00144.pdf)""" def __init__(self, reduce_sum=True, device='cpu'): super(InnerProductLayerNew, self).__init__() self.reduce_sum = reduce_sum self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
liyunrui/DeepCTR-Torch
InnerProductLayer
false
12,951
[ "Apache-2.0" ]
0
392fd6d39d9ca0ac854022136cdb4d5c68e3a592
https://github.com/liyunrui/DeepCTR-Torch/tree/392fd6d39d9ca0ac854022136cdb4d5c68e3a592
LocalNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/we/cwerb6bh4wkw66tuuogvvdhshhz6u3cw5s3a4jqb23ftxyfx5pg2.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/gd/cgdqnufkmolkhmahgpirghlilfdhf2tzbnkmaswr2jaio34ke2tp.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d => convolution # x => gt # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4h/c4hp7y3t3avb7y4kdpd3qny63wnyjqlzzqqpgqgesca5dwxpwjbq.py # Topologically Sorted Source Nodes: [conv2d, x, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] # Source node to ATen node mapping: # conv2d => convolution # pad_1 => _unsafe_index_2, _unsafe_index_3 # x => mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x4 = (xindex // 36) x2 = (xindex // 36) % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.01 tmp5 = tmp3 * tmp4 tmp6 = tl.where(tmp0, tmp3, tmp5) tl.store(out_ptr0 + (x5), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tz/ctz2qoenjoyo6tyt4vbajmakep7el2x5bm5bjwz5dxrqesj5uyf2.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_2, (64, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 2304, grid=grid(2304), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, 4096, grid=grid(4096), stream=stream0) buf3 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2.run(buf2, buf1, primals_3, buf3, 9216, grid=grid(9216), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf4, primals_5, buf5, buf6, 4096, grid=grid(4096), stream=stream0) del buf4 del primals_5 return (buf6, primals_2, primals_4, buf0, buf2, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.01 tmp5 = tmp3 * tmp4 tmp6 = tl.where(tmp0, tmp3, tmp5) tl.store(out_ptr0 + x5, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_2, (64, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(2304)](primals_1, buf0, 2304, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_1[grid(4096)](buf1, primals_3, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2[grid(9216)]( buf2, buf1, primals_3, buf3, 9216, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) buf6 = buf1 del buf1 triton_poi_fused_convolution_leaky_relu_3[grid(4096)](buf4, primals_5, buf5, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5 class LocalNetNew(nn.Module): def __init__(self, in_channels=16, out_channels=64): """Double convolutional block :param in_channels: number of input channels :param out_channels: number of output channels :returns: N/A :rtype: N/A """ super(LocalNetNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1) self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1) self.lrelu = nn.LeakyReLU() self.refpad = nn.ReflectionPad2d(1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ZombaSY/DeepLPF
LocalNet
false
1,345
[ "BSD-3-Clause" ]
0
adce64ae01bc9e32f465a354cb1f6534f0d13597
https://github.com/ZombaSY/DeepLPF/tree/adce64ae01bc9e32f465a354cb1f6534f0d13597
DilatedResidualLayer
import torch from torch import nn import torch.nn.functional as F class DilatedResidualLayer(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayer, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, x): out = F.relu(self.conv_dilated(x)) out = self.conv_1x1(out) out = self.dropout(out) return x + out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dilation': 1, 'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4 ), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused_add_1[grid(16)](buf3, primals_3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0 ), buf4 class DilatedResidualLayerNew(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayerNew, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, input_0): primals_1 = self.conv_dilated.weight primals_2 = self.conv_dilated.bias primals_4 = self.conv_1x1.weight primals_5 = self.conv_1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Jaakik/hydra-ml
DilatedResidualLayer
false
11,535
[ "MIT" ]
0
eae54fc478163130c94450a2a2ddea4f204c1ea9
https://github.com/Jaakik/hydra-ml/tree/eae54fc478163130c94450a2a2ddea4f204c1ea9
ResidualBlock_noBN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/nd/cndrb2by2g3ednlxsjtytxjhjdm7bdlrgvzzodhsxvxnpml2di7p.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ee/cee5rlyewrt7mgwaib2prtpk3sps6e32amj5myetakd7b423m57w.py # Topologically Sorted Source Nodes: [out_1, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # out_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {}) triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_out_ptr0 + (x3), None) tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 1048576, grid=grid(1048576), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_1, add], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_1.run(buf3, primals_1, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 return (buf3, primals_1, primals_2, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_3, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_add_convolution_1[grid(1048576)](buf3, primals_1, primals_5, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 return buf3, primals_1, primals_2, primals_4, buf1 def initialize_weights(net_l, scale=1): if not isinstance(net_l, list): net_l = [net_l] for net in net_l: for m in net.modules(): if isinstance(m, nn.Conv3d): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight, a=0, mode='fan_in') m.weight.data *= scale if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): init.constant_(m.weight, 1) init.constant_(m.bias.data, 0.0) class ResidualBlock_noBNNew(nn.Module): """Residual block w/o BN ---Conv-ReLU-Conv-+- |________________| """ def __init__(self, nf=64): super(ResidualBlock_noBNNew, self).__init__() self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) initialize_weights([self.conv1, self.conv2], 0.1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LCM1999/VolumeRescaling
ResidualBlock_noBN
false
17,575
[ "Apache-2.0" ]
4
3eeabf057e68804ed945711b440f19e419c10d7a
https://github.com/LCM1999/VolumeRescaling/tree/3eeabf057e68804ed945711b440f19e419c10d7a
UpsamplingBilinear
import torch import torch.nn as nn from torch.quantization import QuantStub from torch.quantization import DeQuantStub class UpsamplingBilinear(nn.Module): def __init__(self): super().__init__() self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) upsample = nn.functional.interpolate(x, scale_factor=2, mode= 'bilinear', align_corners=True) return self.dequant(upsample) def fuse_model(self): pass def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.quantization import QuantStub from torch.quantization import DeQuantStub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.42857142857142855 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = 1.0 tmp25 = triton_helpers.minimum(tmp23, tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp16 + tmp26 tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp30 = tmp29 - tmp28 tmp31 = tmp30 * tmp25 tmp32 = tmp28 + tmp31 tmp33 = tmp27 - tmp32 tmp34 = tmp6.to(tl.float32) tmp35 = tmp5 - tmp34 tmp36 = triton_helpers.maximum(tmp35, tmp4) tmp37 = triton_helpers.minimum(tmp36, tmp24) tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tl.store(in_out_ptr0 + x4, tmp39, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, class UpsamplingBilinearNew(nn.Module): def __init__(self): super().__init__() self.quant = QuantStub() self.dequant = DeQuantStub() def fuse_model(self): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Leslie-Fang/incubator-tvm
UpsamplingBilinear
false
9,303
[ "Apache-2.0" ]
0
aa035f4650926f5e714b02cbab6d974f0a17352f
https://github.com/Leslie-Fang/incubator-tvm/tree/aa035f4650926f5e714b02cbab6d974f0a17352f
AcM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pv/cpvq5akoxii6zri2lssiqurrqsl5nuea43ylpa6xzb433ugqbchi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vd/cvdwj7vqydgw7z7brh663uauzwfegvbcydkvglc3kwgrl6q4zijb.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/e4/ce4cl5fwehnchpxxkcnlocfcdupfyowv73biygcxglbtym6a2can.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4m/c4m3ygzhvntyb2u2ng47arhwfhv5p2yswp6d5b5ltatzm2z6f57y.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf6, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 triton_poi_fused_tanh_1[grid(2048)](buf3, primals_5, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf6, primals_6, primals_4 class AcMNew(nn.Module): def __init__(self, in_dim: 'int', ac_dim: 'int', ac_lim: 'int', discrete: 'bool'=True): super().__init__() self.ac_lim = ac_lim self.discrete = discrete self.fc1 = nn.Linear(in_dim, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, ac_dim) def act(self, obs): action = self.forward(obs) if self.discrete: action = torch.argmax(action, dim=1) return action def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
MIMUW-RL/spp-rl
AcM
false
17,644
[ "MIT" ]
7
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
https://github.com/MIMUW-RL/spp-rl/tree/86b96cdd220cc4eae86f7cfd26924c69b498dcc6
net2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f4/cf435ljkvsbmsfqmb2jfkuq4aor2tpycnclm7llj5efimpnzxiwz.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j3/cj3tcccv2au6fwrk7zrqv6aadd7rde25xioifxhrg3rnq277xw2z.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mv/cmvvxenutty4sz3eynxns45xagw56ec2ufreiud5s5acbv345vt5.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gb/cgbyyfi5lll3xaoryj23lises4aplhm5a4rcgchfqja6bfmq4vbx.py # Topologically Sorted Source Nodes: [conv2d_3, x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten._log_softmax, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # x_4 => amax, sub # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu_3, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_3, %amax), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (1)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp3, tmp13) tmp15 = triton_helpers.maximum(tmp9, tmp14) tmp16 = tmp4 - tmp15 tmp17 = 0.0 tmp18 = tmp4 <= tmp17 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f6/cf62mfpqwv5pcofy3ro6w6ycq3f2iwdol4c5h7ameyvaum2rwiui.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # x_4 => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_5 = async_compile.triton('triton_poi_fused__log_softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + (x2 + (16*y3)), tmp7, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 1, 256, 64)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 16384, grid=grid(16384), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 4, 4), (2048, 1, 512, 128)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf6, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 2, 4, 4), (32, 1, 8, 2)) buf8 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.float32) buf10 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten._log_softmax, aten.threshold_backward] triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4.run(buf7, primals_9, buf8, buf10, 128, grid=grid(128), stream=stream0) del primals_9 buf9 = reinterpret_tensor(buf7, (4, 2, 4, 4), (32, 16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_5.run(buf8, buf9, 8, 16, grid=grid(8, 16), stream=stream0) del buf8 return (buf9, primals_1, buf0, primals_4, primals_6, primals_8, buf2, buf4, buf6, buf9, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4(in_ptr0 , in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr1 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.maximum(tmp3, tmp8) tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp3, tmp13) tmp15 = triton_helpers.maximum(tmp9, tmp14) tmp16 = tmp4 - tmp15 tmp17 = 0.0 tmp18 = tmp4 <= tmp17 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp18, xmask) @triton.jit def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + (x2 + 16 * y3), tmp7, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 1, 256, 64)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(4096)](buf2, primals_2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(16384)](buf4, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 4, 4), (2048, 1, 512, 128)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_3[grid(8192)](buf6, primals_7, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 2, 4, 4), (32, 1, 8, 2)) buf8 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.float32) buf10 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) triton_poi_fused__log_softmax_convolution_relu_threshold_backward_4[ grid(128)](buf7, primals_9, buf8, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf9 = reinterpret_tensor(buf7, (4, 2, 4, 4), (32, 16, 4, 1), 0) del buf7 triton_poi_fused__log_softmax_5[grid(8, 16)](buf8, buf9, 8, 16, XBLOCK=16, YBLOCK=8, num_warps=4, num_stages=1) del buf8 return (buf9, primals_1, buf0, primals_4, primals_6, primals_8, buf2, buf4, buf6, buf9, buf10) class net2New(nn.Module): """ """ def __init__(self, n_classes=2): super(net2New, self).__init__() if torch.cuda.is_available(): torch.device('cuda') else: torch.device('cpu') self.n_classes = n_classes self.conv1 = nn.Conv2d(4, 64, 1) self.conv2 = nn.Conv2d(64, 256, 1) self.conv3 = nn.Conv2d(256, 128, 1) self.conv4 = nn.Conv2d(128, self.n_classes, 1) self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
luisesanmartin/dwelling-recognition
net2
false
7,139
[ "MIT" ]
1
b2437b64088a26746947c1c88077c96332e7b9c6
https://github.com/luisesanmartin/dwelling-recognition/tree/b2437b64088a26746947c1c88077c96332e7b9c6
qy
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/jf/cjfr4sfsrvydqmccowtorseagqzcsymbondn73vnpv23l35q756r.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %view_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + ((4*(x1 // 4)) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = 0.0 tmp10 = tmp8 > tmp9 tmp11 = 0.01 tmp12 = tmp8 * tmp11 tmp13 = tl.where(tmp10, tmp8, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp7, tmp13, tmp14) tmp16 = tmp0 >= tmp5 tmp17 = tmp16 & tmp4 tmp18 = tl.load(in_ptr1 + (x1), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp6, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 6, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tl.load(in_ptr2 + (x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.where(tmp4, tmp21, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/bh/cbhiwirufn4xgjlassc2kuz5i566zrr37i3sefu2ywwzuzkthdsl.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_5), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.01), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_2, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qd/cqdja7yao6mcn2im3ot77wimzd5degbo24upnzgxqbv2njv5uffs.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.cos] # Source node to ATen node mapping: # x_9 => cos # Graph fragment: # %cos : [num_users=2] = call_function[target=torch.ops.aten.cos.default](args = (%addmm_3,), kwargs = {}) triton_poi_fused_cos_2 = async_compile.triton('triton_poi_fused_cos_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cos_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cos_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + (x0), tmp1, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vw/cvwcjxztifu7pseky77dsket6nm7izi3xqkbtqtxa6b7we5vdmtx.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # h => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %where), kwargs = {}) triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr1 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (512, 6), (6, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512, ), (1, )) assert_size_stride(primals_8, (512, 512), (512, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (256, 512), (512, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (4, 256), (256, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_3, primals_2, buf0, 96, grid=grid(96), stream=stream0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (6, 512), (1, 6), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf2, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf3 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (512, 512), (1, 512), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf4, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 buf5 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out=buf5) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf6, primals_9, 8192, grid=grid(8192), stream=stream0) del primals_9 buf7 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf6, reinterpret_tensor(primals_10, (512, 256), (1, 512), 0), alpha=1, beta=1, out=buf7) del primals_11 buf8 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.cos] triton_poi_fused_cos_2.run(buf7, buf8, 4096, grid=grid(4096), stream=stream0) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf8, reinterpret_tensor(primals_12, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf9) del primals_13 # Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_3.run(primals_1, primals_1, 16, grid=grid(16), stream=stream0) del primals_1 return (reinterpret_tensor(buf9, (16, 2), (4, 1), 0), reinterpret_tensor(buf9, (16, 2), (4, 1), 2), buf9, buf0, buf2, buf4, buf6, buf7, buf8, primals_12, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 6), (6, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = 0.0 tmp10 = tmp8 > tmp9 tmp11 = 0.01 tmp12 = tmp8 * tmp11 tmp13 = tl.where(tmp10, tmp8, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp7, tmp13, tmp14) tmp16 = tmp0 >= tmp5 tmp17 = tmp16 & tmp4 tmp18 = tl.load(in_ptr1 + x1, tmp17 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.where(tmp6, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp25 = tl.load(in_ptr2 + x1, tmp22 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.where(tmp4, tmp21, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, None) @triton.jit def triton_poi_fused_cos_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + x0, tmp1, None) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr1 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (512, 6), (6, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512), (512, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512), (512, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (4, 256), (256, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 6), (6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(96)](primals_1, primals_3, primals_2, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (6, 512), (1, 6), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_leaky_relu_1[grid(8192)](buf2, primals_5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf3 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (512, 512), ( 1, 512), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_leaky_relu_1[grid(8192)](buf4, primals_7, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (512, 512), ( 1, 512), 0), out=buf5) buf6 = buf5 del buf5 triton_poi_fused_leaky_relu_1[grid(8192)](buf6, primals_9, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf7 = empty_strided_cuda((16, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_11, buf6, reinterpret_tensor( primals_10, (512, 256), (1, 512), 0), alpha=1, beta=1, out=buf7) del primals_11 buf8 = empty_strided_cuda((16, 256), (256, 1), torch.float32) triton_poi_fused_cos_2[grid(4096)](buf7, buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf8, reinterpret_tensor( primals_12, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf9) del primals_13 triton_poi_fused_leaky_relu_3[grid(16)](primals_1, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return (reinterpret_tensor(buf9, (16, 2), (4, 1), 0), reinterpret_tensor(buf9, (16, 2), (4, 1), 2), buf9, buf0, buf2, buf4, buf6, buf7, buf8, primals_12, primals_10, primals_8, primals_6) def weights_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity ='leaky_relu') try: nn.init.constant_(m.bias, 0.01) except: pass if isinstance(m, nn.Linear): nn.init.kaiming_uniform_(m.weight, a=0, mode='fan_in', nonlinearity ='leaky_relu') nn.init.constant_(m.bias, 0.01) class Generator(nn.Module): def __init__(self, input_dim=8, output_dim=2): super(Generator, self).__init__() self.linear1 = nn.Linear(input_dim, 512) self.linear2 = nn.Linear(512, 512) self.linear3 = nn.Linear(512, 512) self.linear4 = nn.Linear(512, 256) self.linear5 = nn.Linear(256, output_dim) self.apply(weights_init) def forward(self, condition, v0, t): x = torch.cat([condition, v0], dim=1) x = torch.cat([x, t], dim=1) x = self.linear1(x) x = F.leaky_relu(x, inplace=True) x = self.linear2(x) x = F.leaky_relu(x, inplace=True) x = self.linear3(x) x = F.leaky_relu(x, inplace=True) x = self.linear4(x) x = torch.cos(x) x = self.linear5(x) return x class qyNew(nn.Module): def __init__(self, zy_dim): super(qyNew, self).__init__() self.trajectory_generation = Generator(input_dim=1 + 1 + zy_dim, output_dim=4) def forward(self, input_0, input_1, input_2): primals_4 = self.trajectory_generation.linear1.weight primals_5 = self.trajectory_generation.linear1.bias primals_6 = self.trajectory_generation.linear2.weight primals_7 = self.trajectory_generation.linear2.bias primals_8 = self.trajectory_generation.linear3.weight primals_9 = self.trajectory_generation.linear3.bias primals_10 = self.trajectory_generation.linear4.weight primals_11 = self.trajectory_generation.linear4.bias primals_12 = self.trajectory_generation.linear5.weight primals_13 = self.trajectory_generation.linear5.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
IamWangYunKai/DG-TrajGen
qy
false
8,303
[ "MIT" ]
31
0a8aab7e1c05111a5afe43d53801c55942e9ff56
https://github.com/IamWangYunKai/DG-TrajGen/tree/0a8aab7e1c05111a5afe43d53801c55942e9ff56
LabelPredictor
import torch from torch import nn class LabelPredictor(nn.Module): def __init__(self, nz_feat, classify_rot=True): super(LabelPredictor, self).__init__() self.pred_layer = nn.Linear(nz_feat, 1) def forward(self, feat): pred = self.pred_layer.forward(feat) pred = torch.sigmoid(pred) return pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nz_feat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class LabelPredictorNew(nn.Module): def __init__(self, nz_feat, classify_rot=True): super(LabelPredictorNew, self).__init__() self.pred_layer = nn.Linear(nz_feat, 1) def forward(self, input_0): primals_1 = self.pred_layer.weight primals_2 = self.pred_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JasonQSY/Associative3D
LabelPredictor
false
8,359
[ "MIT" ]
25
c50818b593ec48c38ed7ee3e109c23531089da32
https://github.com/JasonQSY/Associative3D/tree/c50818b593ec48c38ed7ee3e109c23531089da32
SequentialCNNNet
import torch import torch.nn as nn class SequentialCNNNet(nn.Module): def __init__(self): super(SequentialCNNNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) self.features = nn.Sequential(self.conv1, nn.ReLU(), self.pool, self.conv2, nn.ReLU(), self.pool) self.classifier = nn.Sequential(self.fc1, self.fc2, self.fc3) def forward(self, x): x = self.features(x) x = x.view(-1, 128 * 5 * 5) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 3072 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 14 x2 = xindex // 896 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3584 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3584 * x2), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + 128 * x1 + 3584 * x2), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3584 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = yindex // 5 y5 = yindex y4 = yindex // 25 y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (84, 1024), (1024, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (50, 84), (84, 1)) assert_size_stride(primals_11, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 32, 32), (3072, 1, 96, 3), torch. float32) triton_poi_fused_1[grid(12, 1024)](primals_3, buf1, 12, 1024, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_2[grid(8192, 25)](primals_4, buf2, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 28, 28), (50176, 1, 1792, 64)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_3[grid(200704)](buf4, primals_2, 200704, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf6 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_4[grid(50176)](buf4, buf5, buf6, 50176, XBLOCK=512, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_5[grid(51200)](buf8, primals_5, 51200, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf10 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_6[grid(100, 128)](buf8, buf9, buf10, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (1024, 84), (1, 1024), 0), alpha=1, beta=1, out=buf12) del primals_9 buf13 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.addmm(primals_11, buf12, reinterpret_tensor( primals_10, (84, 50), (1, 84), 0), alpha=1, beta=1, out=buf13) del primals_11 return (buf13, buf0, buf1, buf2, buf4, buf5, buf6, buf8, buf9, reinterpret_tensor(buf10, (4, 3200), (3200, 1), 0), buf11, buf12, primals_10, primals_8, primals_6) class SequentialCNNNetNew(nn.Module): def __init__(self): super(SequentialCNNNetNew, self).__init__() self.conv1 = nn.Conv2d(3, 64, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.fc2 = nn.Linear(1024, 84) self.fc3 = nn.Linear(84, 50) self.features = nn.Sequential(self.conv1, nn.ReLU(), self.pool, self.conv2, nn.ReLU(), self.pool) self.classifier = nn.Sequential(self.fc1, self.fc2, self.fc3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fangkaimin/pytorch_classification_new
SequentialCNNNet
false
10,122
[ "MIT" ]
0
21032e7ab91f0f3106ba07aa97657a023b1cc717
https://github.com/fangkaimin/pytorch_classification_new/tree/21032e7ab91f0f3106ba07aa97657a023b1cc717
ConvNorm
import torch import torch.utils.data class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class ConvNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNormNew, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AeroXi/Tacotron2-Mandarin
ConvNorm
false
13,246
[ "MIT" ]
67
b7bc213d1c1a9c3e2f2e11f69f586c2582010668
https://github.com/AeroXi/Tacotron2-Mandarin/tree/b7bc213d1c1a9c3e2f2e11f69f586c2582010668
NasPathBranch
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_channels, out_channels, stride=1, bias=False): """ Convolution 1x1 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, bias=bias) def nasnet_avgpool1x1_s2(): """ NASNet specific 1x1 Average pooling layer with stride 2. """ return nn.AvgPool2d(kernel_size=1, stride=2, count_include_pad=False) class NasPathBranch(nn.Module): """ NASNet specific `path` branch (auxiliary block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, extra_padding=False): super(NasPathBranch, self).__init__() self.extra_padding = extra_padding self.avgpool = nasnet_avgpool1x1_s2() self.conv = conv1x1(in_channels=in_channels, out_channels=out_channels) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1)) def forward(self, x): if self.extra_padding: x = self.pad(x) x = x[:, :, 1:, 1:].contiguous() x = self.avgpool(x) x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) return buf1, primals_2, buf0 def conv1x1(in_channels, out_channels, stride=1, bias=False): """ Convolution 1x1 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, bias=bias) def nasnet_avgpool1x1_s2(): """ NASNet specific 1x1 Average pooling layer with stride 2. """ return nn.AvgPool2d(kernel_size=1, stride=2, count_include_pad=False) class NasPathBranchNew(nn.Module): """ NASNet specific `path` branch (auxiliary block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, extra_padding=False): super(NasPathBranchNew, self).__init__() self.extra_padding = extra_padding self.avgpool = nasnet_avgpool1x1_s2() self.conv = conv1x1(in_channels=in_channels, out_channels=out_channels) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1)) def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
earhian/imgclsmob
NasPathBranch
false
6,628
[ "MIT" ]
1
c87c0942420876941868c016211073dec4392e4d
https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d
ConvAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # input_1 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/i3/ci3nuuurbsrmcufle642yc7udhwn4itsu6aptfssij5nzrnylpne.py # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_2 => convolution # input_3 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/my/cmykveslman44cmgx6n67zhhjovkcupjikam4mujghfvxablzzpx.py # Topologically Sorted Source Nodes: [input_4, input_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_4 => convolution_1 # input_6 => relu_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%slice_4,), kwargs = {}) # %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %relu_1, 3, -3, 8), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_1, %slice_scatter_default, 2, -3, 9), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x4 = xindex x2 = (xindex // 36) % 4 tmp19 = tl.load(in_out_ptr0 + (x4), xmask) tmp20 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp4 & tmp2 tmp6 = tl.load(in_out_ptr0 + (x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr0 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp5, tmp10, tmp11) tmp13 = tl.load(in_out_ptr0 + (x4), tmp2 & xmask, other=0.0) tmp14 = tl.load(in_ptr0 + (x2), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.where(tmp4, tmp12, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp21 = tmp19 + tmp20 tmp22 = tl.where(tmp2, tmp18, tmp21) tl.store(in_out_ptr0 + (x4), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xy/cxy7c463ocont2eh2rauxmeskuwjwuxxuxekyemvjiwq562uy7br.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] # Source node to ATen node mapping: # Graph fragment: # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%slice_27, 0), kwargs = {}) triton_poi_fused_threshold_backward_3 = async_compile.triton('triton_poi_fused_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_backward_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 3 x2 = (xindex // 9) x3 = xindex tmp0 = tl.load(in_ptr0 + (21 + x0 + (6*x1) + (36*x2)), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 6, 6), (144, 36, 6, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [input_4, input_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 576, grid=grid(576), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] triton_poi_fused_threshold_backward_3.run(buf4, buf5, 144, grid=grid(144), stream=stream0) return (reinterpret_tensor(buf4, (4, 4, 3, 3), (144, 36, 6, 1), 21), primals_2, primals_4, buf0, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex x2 = xindex // 36 % 4 tmp19 = tl.load(in_out_ptr0 + x4, xmask) tmp20 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp4 & tmp2 tmp6 = tl.load(in_out_ptr0 + x4, tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr0 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp5, tmp10, tmp11) tmp13 = tl.load(in_out_ptr0 + x4, tmp2 & xmask, other=0.0) tmp14 = tl.load(in_ptr0 + x2, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.where(tmp4, tmp12, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp21 = tmp19 + tmp20 tmp22 = tl.where(tmp2, tmp18, tmp21) tl.store(in_out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_threshold_backward_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 3 x2 = xindex // 9 x3 = xindex tmp0 = tl.load(in_ptr0 + (21 + x0 + 6 * x1 + 36 * x2), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 6, 6), (144, 36, 6, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(576)](buf4, primals_5, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) triton_poi_fused_threshold_backward_3[grid(144)](buf4, buf5, 144, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 3, 3), (144, 36, 6, 1), 21 ), primals_2, primals_4, buf0, buf2, buf5 class Conv2dSamePad(nn.Module): """ Implement Tensorflow's 'SAME' padding mode in Conv2d. When an odd number, say `m`, of pixels are need to pad, Tensorflow will pad one more column at right or one more row at bottom. But Pytorch will pad `m+1` pixels, i.e., Pytorch always pads in both sides. So we can pad the tensor in the way of Tensorflow before call the Conv2d module. """ def __init__(self, kernel_size, stride): super(Conv2dSamePad, self).__init__() self.kernel_size = kernel_size if type(kernel_size) in [list, tuple ] else [kernel_size, kernel_size] self.stride = stride if type(stride) in [list, tuple] else [stride, stride] def forward(self, x): in_height = x.size(2) in_width = x.size(3) out_height = math.ceil(float(in_height) / float(self.stride[0])) out_width = math.ceil(float(in_width) / float(self.stride[1])) pad_along_height = (out_height - 1) * self.stride[0 ] + self.kernel_size[0] - in_height pad_along_width = (out_width - 1) * self.stride[1] + self.kernel_size[1 ] - in_width pad_top = math.floor(pad_along_height / 2) pad_left = math.floor(pad_along_width / 2) pad_bottom = pad_along_height - pad_top pad_right = pad_along_width - pad_left return F.pad(x, [pad_left, pad_right, pad_top, pad_bottom], 'constant', 0) class ConvTranspose2dSamePad(nn.Module): """ This module implements the "SAME" padding mode for ConvTranspose2d as in Tensorflow. A tensor with width w_in, feed it to ConvTranspose2d(ci, co, kernel, stride), the width of output tensor T_nopad: w_nopad = (w_in - 1) * stride + kernel If we use padding, i.e., ConvTranspose2d(ci, co, kernel, stride, padding, output_padding), the width of T_pad: w_pad = (w_in - 1) * stride + kernel - (2*padding - output_padding) = w_nopad - (2*padding - output_padding) Yes, in ConvTranspose2d, more padding, the resulting tensor is smaller, i.e., the padding is actually deleting row/col. If `pad`=(2*padding - output_padding) is odd, Pytorch deletes more columns in the left, i.e., the first ceil(pad/2) and last `pad - ceil(pad/2)` columns of T_nopad are deleted to get T_pad. In contrast, Tensorflow deletes more columns in the right, i.e., the first floor(pad/2) and last `pad - floor(pad/2)` columns are deleted. For the height, Pytorch deletes more rows at top, while Tensorflow at bottom. In practice, we usually want `w_pad = w_in * stride` or `w_pad = w_in * stride - 1`, i.e., the "SAME" padding mode in Tensorflow. To determine the value of `w_pad`, we should pass it to this function. So the number of columns to delete: pad = 2*padding - output_padding = w_nopad - w_pad If pad is even, we can directly set padding=pad/2 and output_padding=0 in ConvTranspose2d. If pad is odd, we can use ConvTranspose2d to get T_nopad, and then delete `pad` rows/columns by ourselves. This module should be called after the ConvTranspose2d module with shared kernel_size and stride values. """ def __init__(self, output_size): super(ConvTranspose2dSamePad, self).__init__() self.output_size = output_size def forward(self, x): in_height = x.size(2) in_width = x.size(3) pad_height = in_height - self.output_size[0] pad_width = in_width - self.output_size[1] pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left return x[:, :, pad_top:in_height - pad_bottom, pad_left:in_width - pad_right] class ConvAENew(nn.Module): def __init__(self, channels, kernels): """ :param channels: a list containing all channels including the input image channel (1 for gray, 3 for RGB) :param kernels: a list containing all kernel sizes, it should satisfy: len(kernels) = len(channels) - 1. """ super(ConvAENew, self).__init__() assert isinstance(channels, list) and isinstance(kernels, list) self.encoder = nn.Sequential() for i in range(1, len(channels)): self.encoder.add_module('pad%d' % i, Conv2dSamePad(kernels[i - 1], 2)) self.encoder.add_module('conv%d' % i, nn.Conv2d(channels[i - 1], channels[i], kernel_size=kernels[i - 1], stride=2)) self.encoder.add_module('relu%d' % i, nn.ReLU(True)) self.decoder = nn.Sequential() channels = list(reversed(channels)) kernels = list(reversed(kernels)) sizes = [[12, 11], [24, 21], [48, 42]] for i in range(len(channels) - 1): self.decoder.add_module('deconv%d' % (i + 1), nn. ConvTranspose2d(channels[i], channels[i + 1], kernel_size= kernels[i], stride=2)) self.decoder.add_module('padd%d' % i, ConvTranspose2dSamePad( sizes[i])) self.decoder.add_module('relud%d' % i, nn.ReLU(True)) def forward(self, input_0): primals_1 = self.encoder.conv1.weight primals_3 = self.encoder.conv1.bias primals_2 = self.decoder.deconv1.weight primals_5 = self.decoder.deconv1.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
qilinli/DSC-Net
ConvAE
false
4,154
[ "MIT" ]
0
c0e7a3cae3e07c34b2989234f568c7007cf0fc55
https://github.com/qilinli/DSC-Net/tree/c0e7a3cae3e07c34b2989234f568c7007cf0fc55
CapsuleLoss
import torch import torch.nn as nn import torch.nn.functional as F class MarginLoss(nn.Module): def __init__(self, size_average=False, loss_lambda=0.5): """ Margin loss for digit existence Eq. (4): L_k = T_k * max(0, m+ - ||v_k||)^2 + lambda * (1 - T_k) * max(0, ||v_k|| - m-)^2 Args: size_average: should the losses be averaged (True) or summed (False) over observations for each minibatch. loss_lambda: parameter for down-weighting the loss for missing digits """ super(MarginLoss, self).__init__() self.size_average = size_average self.m_plus = 0.9 self.m_minus = 0.1 self.loss_lambda = loss_lambda def forward(self, inputs, labels): L_k = labels * F.relu(self.m_plus - inputs) ** 2 + self.loss_lambda * ( 1 - labels) * F.relu(inputs - self.m_minus) ** 2 L_k = L_k.sum(dim=1) if self.size_average: return L_k.mean() else: return L_k.sum() class CapsuleLoss(nn.Module): def __init__(self, loss_lambda=0.5, recon_loss_scale=0.0005, size_average=False): """ Combined margin loss and reconstruction loss. Margin loss see above. Sum squared error (SSE) was used as a reconstruction loss. Args: recon_loss_scale: param for scaling down the the reconstruction loss size_average: if True, reconstruction loss becomes MSE instead of SSE """ super(CapsuleLoss, self).__init__() self.size_average = size_average self.margin_loss = MarginLoss(size_average=size_average, loss_lambda=loss_lambda) self.reconstruction_loss = nn.MSELoss(size_average=size_average) self.recon_loss_scale = recon_loss_scale def forward(self, inputs, labels, images, reconstructions): margin_loss = self.margin_loss(inputs, labels) reconstruction_loss = self.reconstruction_loss(reconstructions, images) caps_loss = margin_loss + self.recon_loss_scale * reconstruction_loss return caps_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp18 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp32 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp33 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp46 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp47 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = 0.9 tmp3 = tmp2 - tmp1 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp5 * tmp5 tmp7 = tmp0 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp0 tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = 0.1 tmp13 = tmp1 - tmp12 tmp14 = triton_helpers.maximum(tmp4, tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp11 * tmp15 tmp17 = tmp7 + tmp16 tmp20 = tmp2 - tmp19 tmp21 = triton_helpers.maximum(tmp4, tmp20) tmp22 = tmp21 * tmp21 tmp23 = tmp18 * tmp22 tmp24 = tmp8 - tmp18 tmp25 = tmp24 * tmp10 tmp26 = tmp19 - tmp12 tmp27 = triton_helpers.maximum(tmp4, tmp26) tmp28 = tmp27 * tmp27 tmp29 = tmp25 * tmp28 tmp30 = tmp23 + tmp29 tmp31 = tmp17 + tmp30 tmp34 = tmp2 - tmp33 tmp35 = triton_helpers.maximum(tmp4, tmp34) tmp36 = tmp35 * tmp35 tmp37 = tmp32 * tmp36 tmp38 = tmp8 - tmp32 tmp39 = tmp38 * tmp10 tmp40 = tmp33 - tmp12 tmp41 = triton_helpers.maximum(tmp4, tmp40) tmp42 = tmp41 * tmp41 tmp43 = tmp39 * tmp42 tmp44 = tmp37 + tmp43 tmp45 = tmp31 + tmp44 tmp48 = tmp2 - tmp47 tmp49 = triton_helpers.maximum(tmp4, tmp48) tmp50 = tmp49 * tmp49 tmp51 = tmp46 * tmp50 tmp52 = tmp8 - tmp46 tmp53 = tmp52 * tmp10 tmp54 = tmp47 - tmp12 tmp55 = triton_helpers.maximum(tmp4, tmp54) tmp56 = tmp55 * tmp55 tmp57 = tmp53 * tmp56 tmp58 = tmp51 + tmp57 tmp59 = tmp45 + tmp58 tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK]) tmp62 = tl.sum(tmp60, 1)[:, None] tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp62, None) @triton.jit def triton_per_fused_add_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_out_ptr0 + 0) tmp8 = tl.broadcast_to(tmp7, [1]) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp9 = 0.0005 tmp10 = tmp6 * tmp9 tmp11 = tmp8 + tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0[grid(1)](arg1_1, arg0_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = buf1 del buf1 triton_per_fused_add_mse_loss_mul_1[grid(1)](buf3, arg3_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del arg3_1 return buf3, class MarginLoss(nn.Module): def __init__(self, size_average=False, loss_lambda=0.5): """ Margin loss for digit existence Eq. (4): L_k = T_k * max(0, m+ - ||v_k||)^2 + lambda * (1 - T_k) * max(0, ||v_k|| - m-)^2 Args: size_average: should the losses be averaged (True) or summed (False) over observations for each minibatch. loss_lambda: parameter for down-weighting the loss for missing digits """ super(MarginLoss, self).__init__() self.size_average = size_average self.m_plus = 0.9 self.m_minus = 0.1 self.loss_lambda = loss_lambda def forward(self, inputs, labels): L_k = labels * F.relu(self.m_plus - inputs) ** 2 + self.loss_lambda * ( 1 - labels) * F.relu(inputs - self.m_minus) ** 2 L_k = L_k.sum(dim=1) if self.size_average: return L_k.mean() else: return L_k.sum() class CapsuleLossNew(nn.Module): def __init__(self, loss_lambda=0.5, recon_loss_scale=0.0005, size_average=False): """ Combined margin loss and reconstruction loss. Margin loss see above. Sum squared error (SSE) was used as a reconstruction loss. Args: recon_loss_scale: param for scaling down the the reconstruction loss size_average: if True, reconstruction loss becomes MSE instead of SSE """ super(CapsuleLossNew, self).__init__() self.size_average = size_average self.margin_loss = MarginLoss(size_average=size_average, loss_lambda=loss_lambda) self.reconstruction_loss = nn.MSELoss(size_average=size_average) self.recon_loss_scale = recon_loss_scale def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
richardsun-voyager/capsule-network
CapsuleLoss
false
7,554
[ "MIT" ]
1
349cec1caa9ab95ff4b3333c33d04b1bdb442f67
https://github.com/richardsun-voyager/capsule-network/tree/349cec1caa9ab95ff4b3333c33d04b1bdb442f67
Vgg16
import torch from torch import nn import torch.nn.functional as F class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X): h = F.relu(self.conv1_1(X), inplace=True) h = F.relu(self.conv1_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h), inplace=True) h = F.relu(self.conv2_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h), inplace=True) h = F.relu(self.conv3_2(h), inplace=True) h = F.relu(self.conv3_3(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h), inplace=True) h = F.relu(self.conv4_2(h), inplace=True) h = F.relu(self.conv4_3(h), inplace=True) h = F.relu(self.conv5_1(h), inplace=True) h = F.relu(self.conv5_2(h), inplace=True) h = F.relu(self.conv5_3(h), inplace=True) relu5_3 = h return relu5_3 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_15[grid(131072)](buf41, primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_15[grid(131072)](buf43, primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64) ](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK= 32, num_warps=4, num_stages=1) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46) class Vgg16New(nn.Module): def __init__(self): super(Vgg16New, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
YueZHOU0926/MUNIT_3D
Vgg16
false
12,055
[ "MIT" ]
0
5cb22b5f3cb127d5b2c4eea038254a7881bab372
https://github.com/YueZHOU0926/MUNIT_3D/tree/5cb22b5f3cb127d5b2c4eea038254a7881bab372
WScaleLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ow/cowfee35kpe4fozqjkhe365j7x54f6lbx5ziwwqb3k3xg2ha4t5j.py # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # x => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class WScaleLayerNew(nn.Module): def __init__(self, size): super(WScaleLayerNew, self).__init__() self.scale = nn.Parameter(torch.randn([1])) self.b = nn.Parameter(torch.randn(size)) self.size = size def forward(self, input_0): primals_2 = self.scale primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors
WScaleLayer
false
7,848
[ "MIT" ]
24
4198bd2d325a32ffc4e714c486540e63440ab110
https://github.com/ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors/tree/4198bd2d325a32ffc4e714c486540e63440ab110
ChannelAttention_avg
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zy/czyrimvsfdgehav6rdabpeydp2eeevjo6pbh4uvgqw77qulgvho5.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/of/cofcgw47ymoiqc63s7satzahuexl2awjmsq4wmifftjukyl7ayjq.py # Topologically Sorted Source Nodes: [y_sum], Original ATen: [aten.cat] # Source node to ATen node mapping: # y_sum => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %expand_1], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 64, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + (1 + (2*x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2, 8, 1, 1), (8, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, 32, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [avg_out], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1)) buf5 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_sum], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_2, primals_3, buf1, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 2 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp12 = tl.load(in_ptr0 + (1 + 2 * x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2, 8, 1, 1), (8, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(32)](buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1)) buf5 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.float32) triton_poi_fused_cat_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, primals_2, primals_3, buf1, buf3, buf4 class ChannelAttention_avgNew(nn.Module): def __init__(self, in_planes, ratio=8): super(ChannelAttention_avgNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(in_planes, ratio, 1, bias=False) self.relu1 = nn.ReLU() self.fc2 = nn.Conv2d(ratio, 2, 1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HT-hlf/mmdetection_miner-2.22.0
ChannelAttention_avg
false
2,318
[ "Apache-2.0" ]
0
76eb94d6547f9f95cd58f41bb5c91941e82322b9
https://github.com/HT-hlf/mmdetection_miner-2.22.0/tree/76eb94d6547f9f95cd58f41bb5c91941e82322b9
LN_self
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yw/cywoaswdwrpxhidv5azgjhhzgbcbci44yowv4c42f5bnzwh5u3sl.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, X, mul, add_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # X => div # add => add # add_1 => add_1 # mul => mul # sqrt => sqrt # sub => sub # var_mean => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_per_fused_add_div_mul_sqrt_sub_var_mean_0 = async_compile.triton('triton_per_fused_add_div_mul_sqrt_sub_var_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sqrt_sub_var_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp23 = tmp0 - tmp10 tmp24 = tmp23 / tmp21 tmp25 = tmp22 * tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (64*x0)), tmp27, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, X, mul, add_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_sqrt_sub_var_mean_0.run(buf3, primals_1, primals_2, primals_3, buf0, buf4, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf4, primals_1, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mul_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp23 = tmp0 - tmp10 tmp24 = tmp23 / tmp21 tmp25 = tmp22 * tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp27, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mul_sqrt_sub_var_mean_0[grid(4)](buf3, primals_1, primals_2, primals_3, buf0, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf0, buf3 class LN_selfNew(nn.Module): def __init__(self, num_features): super().__init__() shape = 1, num_features, 1, 1 self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
EkdeepSLubana/BeyondBatchNorm
LN_self
false
17,240
[ "MIT" ]
10
2ab1626a1ebfdfe55f0a4bc6ac24c8bbdd4e0196
https://github.com/EkdeepSLubana/BeyondBatchNorm/tree/2ab1626a1ebfdfe55f0a4bc6ac24c8bbdd4e0196
TwoMLPHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wq/cwqkfc7efcgiuv6rsa3stkinyzeft7fq5wl4uyfa53emahjnunte.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf4, 16, grid=grid(16), stream=stream0) del primals_5 return (buf3, primals_1, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, buf1, buf4, primals_4 class TwoMLPHeadNew(nn.Module): """ Heads for FPN for classification """ def __init__(self, in_channels, representation_size): super(TwoMLPHeadNew, self).__init__() self.fc6 = nn.Linear(in_channels, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) def forward(self, input_0): primals_1 = self.fc6.weight primals_3 = self.fc6.bias primals_2 = self.fc7.weight primals_5 = self.fc7.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
billchenxi/vision
TwoMLPHead
false
1,548
[ "BSD-3-Clause" ]
0
5608b06a6fbc9c84284dad9f07a16df61da10f85
https://github.com/billchenxi/vision/tree/5608b06a6fbc9c84284dad9f07a16df61da10f85
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # out_1 => convolution # out_2 => add, rsqrt, var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp23, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/tv/ctvgdstglzbrzudd6bkrrqy4btcmsc7pfbqrlqw6ojd4t5dl7bkd.py # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten._prelu_kernel, aten.reflection_pad2d] # Source node to ATen node mapping: # out_3 => gt, mul_1, where # out_4 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused__prelu_kernel_reflection_pad2d_2 = async_compile.triton('triton_poi_fused__prelu_kernel_reflection_pad2d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_reflection_pad2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (0)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp9 = tmp8 * tmp4 tmp10 = tl.where(tmp6, tmp4, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/kw/ckw2ihgfnmmk4dhle4jn6wn67uv4od7ypr3h4aqj7caqddj4urj3.py # Topologically Sorted Source Nodes: [out_5, out_6, out_8], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel] # Source node to ATen node mapping: # out_5 => convolution_1 # out_6 => add_1, rsqrt_1, var_mean_1 # out_8 => gt_1, mul_3, where_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_3, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_6), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_6, %mul_3), kwargs = {}) triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0) tmp30 = tl.load(in_ptr2 + (0)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp32 = tmp31 * tmp27 tmp33 = tl.where(tmp29, tmp27, tmp32) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp23, xmask) tl.store(out_ptr1 + (r2 + (16*x3)), tmp33, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf6, primals_3, buf3, 16, 16, grid=grid(16), stream=stream0) del primals_3 buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten._prelu_kernel, aten.reflection_pad2d] triton_poi_fused__prelu_kernel_reflection_pad2d_2.run(buf2, buf3, buf6, primals_4, buf7, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8; del buf8 # reuse buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf13 = reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf11 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5, out_6, out_8], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel] triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3.run(buf9, buf13, primals_6, primals_1, primals_4, buf10, buf14, 16, 16, grid=grid(16), stream=stream0) del primals_6 return (buf14, primals_1, primals_2, primals_4, primals_5, buf0, buf2, buf3, buf6, buf7, buf9, buf10, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused__prelu_kernel_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp9 = tmp8 * tmp4 tmp10 = tl.where(tmp6, tmp4, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp32 = tmp31 * tmp27 tmp33 = tl.where(tmp29, tmp27, tmp32) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp33, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf4 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused__prelu_kernel_reflection_pad2d_2[grid(576)](buf2, buf3, buf6, primals_4, buf7, 576, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf13 = reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf11 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3[ grid(16)](buf9, buf13, primals_6, primals_1, primals_4, buf10, buf14, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_6 return (buf14, primals_1, primals_2, primals_4, primals_5, buf0, buf2, buf3, buf6, buf7, buf9, buf10, buf13) class ResidualBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1): super(ResidualBlockNew, self).__init__() self.padding1 = nn.ReflectionPad2d(padding) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn1 = nn.InstanceNorm2d(out_channels) self.prelu = nn.PReLU() self.padding2 = nn.ReflectionPad2d(padding) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn2 = nn.InstanceNorm2d(out_channels) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.prelu.weight primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
qiyuqianxai/debvc
ResidualBlock
false
10,784
[ "MIT" ]
0
1d919019a3191d1c6a7da9b8f16e47bca6b3aef9
https://github.com/qiyuqianxai/debvc/tree/1d919019a3191d1c6a7da9b8f16e47bca6b3aef9
ConvLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zo/czoefxnecwpok2nmimcnrjk6tumldvtx7xkzpmzgmnay3tkwhjwg.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 8 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7x/c7xksf46hekvm7qrrnxlo34nyt2yrkxxbfiyydgamwacppezvp4w.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class ConvLayerNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayerNew, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, input_0): primals_1 = self.conv2d.weight primals_3 = self.conv2d.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JEF1056/Reconstruction-Style
ConvLayer
false
17,447
[ "MIT" ]
6
3430d9e9f05c6980ae251cf15b619148a2c899d6
https://github.com/JEF1056/Reconstruction-Style/tree/3430d9e9f05c6980ae251cf15b619148a2c899d6
DenseCrossEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logprobs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nb/cnbdxy34iv6vkig4bfuqrxbegug3ek6lhyugevz3qctt7efdvtge.py # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # logprobs => exp, log, sub_1, sum_1 # loss => mul # loss_1 => sum_2 # mean => mean # neg => neg # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, buf0, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class DenseCrossEntropyNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
i-pan/kaggle-melanoma
DenseCrossEntropy
false
15,569
[ "MIT" ]
68
caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
https://github.com/i-pan/kaggle-melanoma/tree/caaec0d7e9cafc7b405eb86e7fdf00107d89e1d9
MidNet4
import torch import torch.nn as nn class MidNet4(nn.Module): def forward(self, x_in): """Network with dilation rate 4 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor """ x = self.lrelu(self.conv1(x_in)) x = self.lrelu(self.conv2(x)) x = self.lrelu(self.conv3(x)) x = self.conv4(x) return x def __init__(self, in_channels=16): """FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A """ super(MidNet4, self).__init__() self.lrelu = nn.LeakyReLU() self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4) self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4) def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0, primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf3, primals_5, buf4, buf5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf8 = buf3 del buf3 triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf6, primals_7, buf7, buf8, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf6 del primals_7 buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_1[grid(1048576)](buf10, primals_9, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8) class MidNet4New(nn.Module): def __init__(self, in_channels=16): """FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A """ super(MidNet4New, self).__init__() self.lrelu = nn.LeakyReLU() self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4) self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
deshwalmahesh/CURL---cpu-gpu
MidNet4
false
3,415
[ "BSD-3-Clause" ]
0
f4e87275b6cce556b9e04a188cf7ae13d810d82a
https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a
GateContextSelectionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2f/c2fcasureqlcqroneun3t4p4fi3ty77g34yxduqsb7qua2qbyhta.py # Topologically Sorted Source Nodes: [add, update, sub, mul, mul_1, out], Original ATen: [aten.add, aten.sigmoid, aten.rsub, aten.mul, aten.sigmoid_backward] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # out => add_1 # sub => sub # update => sigmoid # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_6), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x2), xmask) tmp12 = tl.load(in_ptr5 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp11 = tmp9 * tmp10 tmp13 = tmp7 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp7 * tmp9 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, update, sub, mul, mul_1, out], Original ATen: [aten.add, aten.sigmoid, aten.rsub, aten.mul, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0.run(buf0, primals_2, buf1, primals_5, primals_3, primals_6, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_2 del primals_5 return (buf2, primals_3, primals_6, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask) tmp12 = tl.load(in_ptr5 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp11 = tmp9 * tmp10 tmp13 = tmp7 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp7 * tmp9 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_sigmoid_backward_0[grid(256)]( buf0, primals_2, buf1, primals_5, primals_3, primals_6, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 return buf2, primals_3, primals_6, buf3 class GateContextSelectionLayerNew(nn.Module): def __init__(self, dim_model, dim_ff, prob_dropout): super(GateContextSelectionLayerNew, self).__init__() self.source = nn.Linear(dim_model, dim_model) self.context = nn.Linear(dim_model, dim_model) def forward(self, input_0, input_1): primals_1 = self.source.weight primals_2 = self.source.bias primals_4 = self.context.weight primals_5 = self.context.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
KirkGuo/HCN
GateContextSelectionLayer
false
5,437
[ "MIT" ]
1
7d8020c8d76413b6ca3a359fb2e9b34652949e17
https://github.com/KirkGuo/HCN/tree/7d8020c8d76413b6ca3a359fb2e9b34652949e17
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [state_action], Original ATen: [aten.cat] # Source node to ATen node mapping: # state_action => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ac/cacdwifxdru2eihx3n66wqfym5hjpdo6yxk3gsol5t54xroplkwv.py # Topologically Sorted Source Nodes: [q], Original ATen: [aten.relu] # Source node to ATen node mapping: # q => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vp/cvpyw2bjgo55x2ne47dmpi2vmqu5t4eb3wcvjgjcnove3xyj7bcr.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # q_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400, ), (1, )) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300, ), (1, )) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [state_action], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [q], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 1600, grid=grid(1600), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf4, primals_6, 1200, grid=grid(1200), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 return (buf6, buf0, buf2, buf4, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), ( 1, 400), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 return buf6, buf0, buf2, buf4, primals_7, primals_5 class CriticNew(nn.Module): def __init__(self, state_dim, action_dim): super(CriticNew, self).__init__() self.fc1 = nn.Linear(state_dim + action_dim, 400) self.fc2 = nn.Linear(400, 300) self.fc3 = nn.Linear(300, 1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Chris0919/Deep-reinforcement-learning-with-pytorch
Critic
false
4,999
[ "MIT" ]
1
a4f458dde7659654fcae4635d25f6bd05a5d2d6c
https://github.com/Chris0919/Deep-reinforcement-learning-with-pytorch/tree/a4f458dde7659654fcae4635d25f6bd05a5d2d6c
ScaledDotProductAttentionMemory
import torch import numpy as np from torch import nn class ScaledDotProductAttentionMemory(nn.Module): """ Scaled dot-product attention with memory """ def __init__(self, d_model, d_k, d_v, h, m): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads :param m: Number of memory slots """ super(ScaledDotProductAttentionMemory, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k)) self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v)) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.m = m self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.normal_(self.m_k, 0, 1 / self.d_k) nn.init.normal_(self.m_v, 0, 1 / self.m) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] m_k = np.sqrt(self.d_k) * self.m_k.expand(b_s, self.m, self.h * self.d_k) m_v = np.sqrt(self.m) * self.m_v.expand(b_s, self.m, self.h * self.d_v) q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = torch.cat([self.fc_k(keys), m_k], 1).view(b_s, nk + self.m, self.h, self.d_k).permute(0, 2, 3, 1) v = torch.cat([self.fc_v(values), m_v], 1).view(b_s, nk + self.m, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = torch.cat([att[:, :, :, :nk] * attention_weights, att[:, :, :, nk:]], -1) if attention_mask is not None: att[:, :, :, :nk] = att[:, :, :, :nk].masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4, 'm': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 16 * x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 2.0 tmp11 = tmp10 * tmp9 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused__softmax_sqrt_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 2.0, tl.float64) tmp2 = tl.full([1, 1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tmp6.to(tl.float64) tmp14 = tmp13 * tmp1 tmp15 = tmp14.to(tl.float32) tmp16 = tmp12 / tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp17 / tmp21 tl.store(out_ptr2 + (r1 + 8 * x0), tmp22, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 2.0 tmp11 = tmp10 * tmp9 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 16), (64, 16, 1)) assert_size_stride(primals_4, (1, 4, 16), (64, 16, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf0) del primals_5 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_7 del primals_8 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(primals_11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_10 del primals_9 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_clone_1[grid(512)](buf1, primals_3, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 8), (32, 8, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 8), (32, 8, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_per_fused__softmax_sqrt_2[grid(64)](buf5, buf8, 64, 8, XBLOCK=32, num_warps=2, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 8, 4), (128, 32, 4, 1), 0) del buf5 triton_poi_fused_clone_3[grid(512)](buf2, primals_4, buf9, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf10 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 8), (32, 8, 1), 0), reinterpret_tensor(buf9, (16, 8, 4), (32, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_4[grid(256)](buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf10 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf11, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf12) del primals_13 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_11, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (16, 16), (16, 1), 0 ), primals_12, reinterpret_tensor(buf9, (16, 4, 8), (32, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 8, 4), (32, 1, 8), 0) class ScaledDotProductAttentionMemoryNew(nn.Module): """ Scaled dot-product attention with memory """ def __init__(self, d_model, d_k, d_v, h, m): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads :param m: Number of memory slots """ super(ScaledDotProductAttentionMemoryNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k)) self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v)) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.m = m self.init_weights() def init_weights(self): nn.init.xavier_uniform_(self.fc_q.weight) nn.init.xavier_uniform_(self.fc_k.weight) nn.init.xavier_uniform_(self.fc_v.weight) nn.init.xavier_uniform_(self.fc_o.weight) nn.init.normal_(self.m_k, 0, 1 / self.d_k) nn.init.normal_(self.m_v, 0, 1 / self.m) nn.init.constant_(self.fc_q.bias, 0) nn.init.constant_(self.fc_k.bias, 0) nn.init.constant_(self.fc_v.bias, 0) nn.init.constant_(self.fc_o.bias, 0) def forward(self, input_0, input_1, input_2): primals_3 = self.m_k primals_4 = self.m_v primals_5 = self.fc_q.weight primals_6 = self.fc_q.bias primals_7 = self.fc_k.weight primals_8 = self.fc_k.bias primals_9 = self.fc_v.weight primals_10 = self.fc_v.bias primals_12 = self.fc_o.weight primals_13 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_11 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Kilichbek/artemis-m2-transformer
ScaledDotProductAttentionMemory
false
17,550
[ "MIT" ]
8
99f7e797965710bf2565283d6b5028a6fe32664c
https://github.com/Kilichbek/artemis-m2-transformer/tree/99f7e797965710bf2565283d6b5028a6fe32664c
DenseGraphConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/3i/c3iibarqokkpdvrjfwijmnifcbse5v3c5vj2oqjzbj6rok54jxf5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/w2/cw2bwqpq3dkexeyqz25khcvdcedkdcrcwpb7zrtd6eayijd5lgez.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %view_6), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, buf3, primals_5, 256, grid=grid(256), stream=stream0) del buf3 del primals_5 return (buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.lin.weight primals_5 = self.lin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
cshjin/pytorch_geometric
DenseGraphConv
false
1,763
[ "MIT" ]
0
8dd0e76beb72135949a275edd851f80f7b97648f
https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f
Unet
import torch import torch.nn as nn def crop(image, new_shape): plus_h, plus_w = 0, 0 if new_shape[2] % 2 != 0: plus_h = 1 if new_shape[3] % 2 != 0: plus_w = 1 middle_height = image.shape[2] // 2 middle_weight = image.shape[3] // 2 go_height = new_shape[2] // 2 go_weight = new_shape[3] // 2 cropped_image = image[:, :, middle_height - go_height:middle_height + go_height + plus_h, middle_weight - go_weight:middle_weight + go_weight + plus_w] return cropped_image class ContractingBlock(nn.Module): def __init__(self, input_channel): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.conv1(x)) x = self.activation(self.conv2(x)) return self.maxpool(x) class ExpandingBlock(nn.Module): def __init__(self, input_channels): super(ExpandingBlock, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(2, 2)) self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(3, 3)) self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=(3, 3)) self.activation = nn.ReLU() def forward(self, skip_con_x, x): x = self.upsample(x) x = self.conv1(x) skip_con_x = crop(skip_con_x, x.shape) x = torch.cat([skip_con_x, x], axis=1) x = self.conv2(x) x = self.activation(x) x = self.conv3(x) return self.activation(x) class FeatureMapBlock(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlock, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= (1, 1)) def forward(self, x): return self.conv(x) class Unet(nn.Module): def __init__(self, input_channels, output_channels, hidden_channels=64): super(Unet, self).__init__() self.upfeature = FeatureMapBlock(input_channels, hidden_channels) self.contract1 = ContractingBlock(hidden_channels) self.contract2 = ContractingBlock(hidden_channels * 2) self.contract3 = ContractingBlock(hidden_channels * 4) self.contract4 = ContractingBlock(hidden_channels * 8) self.expanding1 = ExpandingBlock(hidden_channels * 16) self.expanding2 = ExpandingBlock(hidden_channels * 8) self.expanding3 = ExpandingBlock(hidden_channels * 4) self.expanding4 = ExpandingBlock(hidden_channels * 2) self.downfeature = FeatureMapBlock(hidden_channels, output_channels) def forward(self, x): x0 = self.upfeature(x) x1 = self.contract1(x0) x2 = self.contract2(x1) x3 = self.contract3(x2) x4 = self.contract4(x3) x5 = self.expanding1(skip_con_x=x3, x=x4) x6 = self.expanding2(skip_con_x=x2, x=x5) x7 = self.expanding3(skip_con_x=x1, x=x6) x8 = self.expanding4(skip_con_x=x0, x=x7) return self.downfeature(x8) def get_inputs(): return [torch.rand([4, 4, 256, 256])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64516 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 63504 % 128 x0 = xindex % 63504 x4 = xindex // 63504 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 63520 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 126 x1 = xindex // 126 % 126 x2 = xindex // 15876 x3 = xindex % 15876 tmp0 = tl.load(in_ptr0 + (2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (252 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (253 + 2 * x0 + 504 * x1 + 63520 * x2), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 15904 * x2), tmp6, None) tl.store(out_ptr1 + (x3 + 16000 * x2), tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 15376 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 14884 % 256 x0 = xindex % 14884 x4 = xindex // 14884 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 14912 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3810304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 61 x1 = xindex // 61 % 61 x2 = xindex // 3721 x3 = xindex % 3721 tmp0 = tl.load(in_ptr0 + (2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (122 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (123 + 2 * x0 + 244 * x1 + 14912 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 3744 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 3840 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3481 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3249 % 512 x0 = xindex % 3249 x4 = xindex // 3249 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3264 * x4), tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 28 x1 = xindex // 28 % 28 x2 = xindex // 784 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (57 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (58 + 2 * x0 + 114 * x1 + 3264 * x2), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 676 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 576 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) @triton.jit def triton_poi_fused__to_copy_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 11, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4782608695652174 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 24 % 24 x0 = xindex % 24 x2 = xindex // 576 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 12, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.load(in_ptr2 + (24 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr2 + (25 + 2 * tmp8 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (1 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp23 = tl.load(in_ptr2 + (24 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp25 = tl.load(in_ptr2 + (25 + 2 * tmp8 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = tmp27 + tmp1 tmp29 = tmp27 < 0 tmp30 = tl.where(tmp29, tmp28, tmp27) tmp31 = tl.load(in_ptr2 + (2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp33 = triton_helpers.maximum(tmp32, tmp31) tmp34 = tl.load(in_ptr2 + (24 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp35 = triton_helpers.maximum(tmp34, tmp33) tmp36 = tl.load(in_ptr2 + (25 + 2 * tmp30 + 48 * tmp19 + 576 * x2), None, eviction_policy='evict_last') tmp37 = triton_helpers.maximum(tmp36, tmp35) tmp38 = tmp37 - tmp26 tmp40 = tmp38 * tmp39 tmp41 = tmp26 + tmp40 tmp42 = tl.load(in_ptr2 + (2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + (1 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp44 = triton_helpers.maximum(tmp43, tmp42) tmp45 = tl.load(in_ptr2 + (24 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp46 = triton_helpers.maximum(tmp45, tmp44) tmp47 = tl.load(in_ptr2 + (25 + 2 * tmp30 + 48 * tmp4 + 576 * x2), None, eviction_policy='evict_last') tmp48 = triton_helpers.maximum(tmp47, tmp46) tmp49 = tmp48 - tmp15 tmp50 = tmp49 * tmp39 tmp51 = tmp15 + tmp50 tmp52 = tmp51 - tmp41 tmp54 = tmp52 * tmp53 tmp55 = tmp41 + tmp54 tl.store(in_out_ptr1 + x4, tmp55, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 529 % 1024 x0 = xindex % 23 x1 = xindex // 23 % 23 x3 = xindex // 541696 x4 = xindex % 529 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (87 + x0 + 28 * x1 + 784 * x2 + 401408 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 529 * (-512 + x2) + 270848 * x3), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-512 + x2), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 441 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_20(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 18, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_21(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 38 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4864864864864865 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 38 % 38 x0 = xindex % 38 x5 = xindex // 1444 x2 = xindex // 1444 % 512 xindex % 1444 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 19, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 19 * tmp4 + 361 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 19 * tmp4 + 361 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 19 * tmp28 + 361 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 19 * tmp28 + 361 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1369 % 512 x0 = xindex % 37 x1 = xindex // 37 % 37 x3 = xindex // 700928 x4 = xindex % 1369 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (744 + x0 + 61 * x1 + 3744 * x2 + 958464 * x3), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 1369 * (-256 + x2) + 350464 * x3), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-256 + x2), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_relu_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1254400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1225 % 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 32, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_27(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 66 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49230769230769234 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 66 % 66 x0 = xindex % 66 x5 = xindex // 4356 x2 = xindex // 4356 % 256 xindex % 4356 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 33, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 33 * tmp4 + 1089 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 33 * tmp4 + 1089 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 33 * tmp28 + 1089 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 33 * tmp28 + 1089 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4225 % 256 x0 = xindex % 65 x1 = xindex // 65 % 65 x3 = xindex // 1081600 x4 = xindex % 4225 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (3937 + x0 + 126 * x1 + 15904 * x2 + 2035712 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 4225 * (-128 + x2) + 540800 * x3), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-128 + x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_relu_30(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2032128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3969 % 128 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 60, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_33(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 122 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49586776859504134 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 122 % 122 x0 = xindex % 122 x5 = xindex // 14884 x2 = xindex // 14884 % 128 xindex % 14884 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 61, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 61 * tmp4 + 3721 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (tmp17 + 61 * tmp4 + 3721 * x5), None, eviction_policy='evict_last') tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp8 + 61 * tmp28 + 3721 * x5), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (tmp17 + 61 * tmp28 + 3721 * x5), None, eviction_policy='evict_last') tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + x6, tmp41, None) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7496192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 14641 % 128 x0 = xindex % 121 x1 = xindex // 121 % 121 x3 = xindex // 1874048 x4 = xindex % 14641 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (17476 + x0 + 256 * x1 + 65536 * x2 + 4194304 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (x4 + 14641 * (-64 + x2) + 937024 * x3), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3625216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 14161 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3504384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 13689 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 219024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 13689 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1905152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3721 % 128 x0 = xindex % 3721 x4 = xindex // 3721 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 3840 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1089 % 256 x0 = xindex % 1089 x4 = xindex // 1089 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x0 + 1152 * x4), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 361 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45) = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (1024,), (1,)) assert_size_stride(primals_18, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_19, (1024,), (1,)) assert_size_stride(primals_20, (512, 1024, 2, 2), (4096, 4, 2, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (256, 512, 2, 2), (2048, 4, 2, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (128, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (64,), (1,)) assert_size_stride(primals_44, (4, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_45, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 256, 256), (4194304, 65536, 256, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16777216)](buf1, primals_2, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 254, 254), (8258048, 64516, 254, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(33032192)](buf3, primals_5, 33032192, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 252, 252), (8128512, 63504, 252, 1)) buf5 = empty_strided_cuda((4, 128, 252, 252), (8130560, 63520, 252, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(32514048)](buf4, primals_7, buf5, 32514048, XBLOCK=512, num_warps=8, num_stages=1) del buf4 del primals_7 buf6 = empty_strided_cuda((4, 128, 126, 126), (2035712, 15904, 126, 1), torch.float32) buf7 = empty_strided_cuda((4, 128, 126, 126), (2048000, 16000, 126, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(8128512)](buf5, buf6, buf7, 8128512, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 124, 124), (3936256, 15376, 124, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(15745024)](buf9, primals_9, 15745024, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 122, 122), (3810304, 14884, 122, 1)) buf11 = empty_strided_cuda((4, 256, 122, 122), (3817472, 14912, 122, 1), torch.float32) triton_poi_fused_convolution_relu_5[grid(15241216)](buf10, primals_11, buf11, 15241216, XBLOCK=1024, num_warps=4, num_stages=1 ) del buf10 del primals_11 buf12 = empty_strided_cuda((4, 256, 61, 61), (958464, 3744, 61, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 61, 61), (983040, 3840, 61, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(3810304)](buf11, buf12, buf13, 3810304, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 59, 59), (1782272, 3481, 59, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_7[grid(7129088)](buf15, primals_13, 7129088, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 57, 57), (1663488, 3249, 57, 1)) buf17 = empty_strided_cuda((4, 512, 57, 57), (1671168, 3264, 57, 1), torch.float32) triton_poi_fused_convolution_relu_8[grid(6653952)](buf16, primals_15, buf17, 6653952, XBLOCK=512, num_warps=8, num_stages=1) del buf16 del primals_15 buf18 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.float32) buf19 = empty_strided_cuda((4, 512, 28, 28), (401408, 784, 28, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_9[grid(1605632)](buf17, buf18, buf19, 1605632, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 1024, 26, 26), (692224, 676, 26, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_10[grid(2768896)](buf21, primals_17, 2768896, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 1024, 24, 24), (589824, 576, 24, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(2359296)](buf23, primals_19, 2359296, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf24 = empty_strided_cuda((4, 1024, 12, 12), (147456, 144, 12, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(589824)](buf23, buf24, 589824, XBLOCK=512, num_warps=8, num_stages=1) buf25 = empty_strided_cuda((24, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_13[grid(24)](buf25, 24, XBLOCK=32, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((24, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_14[grid(24)](buf26, 24, XBLOCK=32, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((24,), (1,), torch.int64) triton_poi_fused__to_copy_13[grid(24)](buf27, 24, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((24,), (1,), torch.int64) triton_poi_fused_add_clamp_14[grid(24)](buf28, 24, XBLOCK=32, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((24,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_15[grid(24)](buf31, 24, XBLOCK=32, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((24, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_15[grid(24)](buf33, 24, XBLOCK=32, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((4, 1024, 24, 24), (589824, 576, 24, 1), torch.float32) buf34 = buf30 del buf30 buf35 = buf34 del buf34 triton_poi_fused__unsafe_index_add_max_pool2d_with_indices_mul_sub_16[ grid(2359296)](buf35, buf26, buf27, buf23, buf25, buf28, buf31, buf33, 2359296, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 23, 23), (270848, 529, 23, 1)) buf37 = empty_strided_cuda((4, 1024, 23, 23), (541696, 529, 23, 1), torch.float32) triton_poi_fused_cat_17[grid(2166784)](buf18, buf36, primals_21, buf37, 2166784, XBLOCK=1024, num_warps=4, num_stages=1) del buf36 del primals_21 buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 21, 21), (225792, 441, 21, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_18[grid(903168)](buf39, primals_23, 903168, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 19, 19), (184832, 361, 19, 1)) buf41 = empty_strided_cuda((38, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_19[grid(38)](buf41, 38, XBLOCK=64, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((38, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_20[grid(38)](buf42, 38, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((38,), (1,), torch.int64) triton_poi_fused__to_copy_19[grid(38)](buf43, 38, XBLOCK=64, num_warps=1, num_stages=1) buf44 = empty_strided_cuda((38,), (1,), torch.int64) triton_poi_fused_add_clamp_20[grid(38)](buf44, 38, XBLOCK=64, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((38,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(38)](buf45, 38, XBLOCK=64, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((38, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_21[grid(38)](buf47, 38, XBLOCK=64, num_warps=1, num_stages=1) buf49 = empty_strided_cuda((4, 512, 38, 38), (739328, 1444, 38, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_22[grid (2957312)](buf41, buf43, buf40, primals_25, buf44, buf45, buf42, buf47, buf49, 2957312, XBLOCK=1024, num_warps=4, num_stages=1) buf50 = extern_kernels.convolution(buf49, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 37, 37), (350464, 1369, 37, 1)) buf51 = empty_strided_cuda((4, 512, 37, 37), (700928, 1369, 37, 1), torch.float32) triton_poi_fused_cat_23[grid(2803712)](buf12, buf50, primals_27, buf51, 2803712, XBLOCK=1024, num_warps=4, num_stages=1) del buf50 del primals_27 buf52 = extern_kernels.convolution(buf51, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 256, 35, 35), (313600, 1225, 35, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_relu_24[grid(1254400)](buf53, primals_29, 1254400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf54 = extern_kernels.convolution(buf53, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 33, 33), (278784, 1089, 33, 1)) buf55 = empty_strided_cuda((66, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_25[grid(66)](buf55, 66, XBLOCK=128, num_warps=4, num_stages=1) buf56 = empty_strided_cuda((66, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_26[grid(66)](buf56, 66, XBLOCK=128, num_warps=4, num_stages=1) buf57 = empty_strided_cuda((66,), (1,), torch.int64) triton_poi_fused__to_copy_25[grid(66)](buf57, 66, XBLOCK=128, num_warps=4, num_stages=1) buf58 = empty_strided_cuda((66,), (1,), torch.int64) triton_poi_fused_add_clamp_26[grid(66)](buf58, 66, XBLOCK=128, num_warps=4, num_stages=1) buf59 = empty_strided_cuda((66,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_27[grid(66)](buf59, 66, XBLOCK=128, num_warps=4, num_stages=1) buf61 = empty_strided_cuda((66, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_27[grid(66)](buf61, 66, XBLOCK=128, num_warps=4, num_stages=1) buf63 = empty_strided_cuda((4, 256, 66, 66), (1115136, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_28[grid (4460544)](buf55, buf57, buf54, primals_31, buf58, buf59, buf56, buf61, buf63, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) buf64 = extern_kernels.convolution(buf63, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 128, 65, 65), (540800, 4225, 65, 1)) buf65 = empty_strided_cuda((4, 256, 65, 65), (1081600, 4225, 65, 1), torch.float32) triton_poi_fused_cat_29[grid(4326400)](buf6, buf64, primals_33, buf65, 4326400, XBLOCK=512, num_warps=8, num_stages=1) del buf64 del primals_33 buf66 = extern_kernels.convolution(buf65, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 128, 63, 63), (508032, 3969, 63, 1)) buf67 = buf66 del buf66 triton_poi_fused_convolution_relu_30[grid(2032128)](buf67, primals_35, 2032128, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf68 = extern_kernels.convolution(buf67, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 128, 61, 61), (476288, 3721, 61, 1)) buf69 = empty_strided_cuda((122, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_31[grid(122)](buf69, 122, XBLOCK=128, num_warps=4, num_stages=1) buf70 = empty_strided_cuda((122, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_32[grid(122)](buf70, 122, XBLOCK=128, num_warps=4, num_stages=1) buf71 = empty_strided_cuda((122,), (1,), torch.int64) triton_poi_fused__to_copy_31[grid(122)](buf71, 122, XBLOCK=128, num_warps=4, num_stages=1) buf72 = empty_strided_cuda((122,), (1,), torch.int64) triton_poi_fused_add_clamp_32[grid(122)](buf72, 122, XBLOCK=128, num_warps=4, num_stages=1) buf73 = empty_strided_cuda((122,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_33[grid(122)](buf73, 122, XBLOCK=128, num_warps=4, num_stages=1) buf75 = empty_strided_cuda((122, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_33[grid(122)](buf75, 122, XBLOCK=128, num_warps=4, num_stages=1) buf77 = empty_strided_cuda((4, 128, 122, 122), (1905152, 14884, 122, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_34[grid (7620608)](buf69, buf71, buf68, primals_37, buf72, buf73, buf70, buf75, buf77, 7620608, XBLOCK=1024, num_warps=4, num_stages=1) buf78 = extern_kernels.convolution(buf77, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf78, (4, 64, 121, 121), (937024, 14641, 121, 1)) buf79 = empty_strided_cuda((4, 128, 121, 121), (1874048, 14641, 121, 1), torch.float32) triton_poi_fused_cat_35[grid(7496192)](buf1, buf78, primals_39, buf79, 7496192, XBLOCK=1024, num_warps=4, num_stages=1) del buf78 del primals_39 buf80 = extern_kernels.convolution(buf79, primals_40, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf80, (4, 64, 119, 119), (906304, 14161, 119, 1)) buf81 = buf80 del buf80 triton_poi_fused_convolution_relu_36[grid(3625216)](buf81, primals_41, 3625216, XBLOCK=1024, num_warps=4, num_stages=1) del primals_41 buf82 = extern_kernels.convolution(buf81, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf82, (4, 64, 117, 117), (876096, 13689, 117, 1)) buf83 = buf82 del buf82 triton_poi_fused_convolution_relu_37[grid(3504384)](buf83, primals_43, 3504384, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf84 = extern_kernels.convolution(buf83, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 4, 117, 117), (54756, 13689, 117, 1)) buf85 = buf84 del buf84 triton_poi_fused_convolution_38[grid(219024)](buf85, primals_45, 219024, XBLOCK=512, num_warps=8, num_stages=1) del primals_45 buf86 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_39[grid(1905152)]( buf68, primals_37, buf86, 1905152, XBLOCK=1024, num_warps=4, num_stages=1) del buf68 del primals_37 buf87 = empty_strided_cuda((4, 256, 33, 33), (294912, 1152, 33, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_40[grid(1115136)]( buf54, primals_31, buf87, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) del buf54 del primals_31 buf88 = empty_strided_cuda((4, 512, 19, 19), (184832, 361, 19, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_41[grid(739328)]( buf40, primals_25, buf88, 739328, XBLOCK=1024, num_warps=4, num_stages=1) del buf40 del primals_25 return (buf85, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, buf1, buf3, buf5, buf6, buf7, buf9, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf26, buf27, buf28, buf31, buf33, buf35, buf37, buf39, buf41, buf42, buf43, buf44, buf45, buf47, buf49, buf51, buf53, buf55, buf56, buf57, buf58, buf59, buf61, buf63, buf65, buf67, buf69, buf70, buf71, buf72, buf73, buf75, buf77, buf79, buf81, buf83, buf86, buf87, buf88) def crop(image, new_shape): plus_h, plus_w = 0, 0 if new_shape[2] % 2 != 0: plus_h = 1 if new_shape[3] % 2 != 0: plus_w = 1 middle_height = image.shape[2] // 2 middle_weight = image.shape[3] // 2 go_height = new_shape[2] // 2 go_weight = new_shape[3] // 2 cropped_image = image[:, :, middle_height - go_height:middle_height + go_height + plus_h, middle_weight - go_weight:middle_weight + go_weight + plus_w] return cropped_image class ContractingBlock(nn.Module): def __init__(self, input_channel): super(ContractingBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=input_channel, out_channels= input_channel * 2, kernel_size=(3, 3)) self.conv2 = nn.Conv2d(input_channel * 2, input_channel * 2, kernel_size=(3, 3)) self.maxpool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.conv1(x)) x = self.activation(self.conv2(x)) return self.maxpool(x) class ExpandingBlock(nn.Module): def __init__(self, input_channels): super(ExpandingBlock, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(2, 2)) self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=(3, 3)) self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=(3, 3)) self.activation = nn.ReLU() def forward(self, skip_con_x, x): x = self.upsample(x) x = self.conv1(x) skip_con_x = crop(skip_con_x, x.shape) x = torch.cat([skip_con_x, x], axis=1) x = self.conv2(x) x = self.activation(x) x = self.conv3(x) return self.activation(x) class FeatureMapBlock(nn.Module): def __init__(self, input_channels, output_channels): super(FeatureMapBlock, self).__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= (1, 1)) def forward(self, x): return self.conv(x) class UnetNew(nn.Module): def __init__(self, input_channels, output_channels, hidden_channels=64): super(UnetNew, self).__init__() self.upfeature = FeatureMapBlock(input_channels, hidden_channels) self.contract1 = ContractingBlock(hidden_channels) self.contract2 = ContractingBlock(hidden_channels * 2) self.contract3 = ContractingBlock(hidden_channels * 4) self.contract4 = ContractingBlock(hidden_channels * 8) self.expanding1 = ExpandingBlock(hidden_channels * 16) self.expanding2 = ExpandingBlock(hidden_channels * 8) self.expanding3 = ExpandingBlock(hidden_channels * 4) self.expanding4 = ExpandingBlock(hidden_channels * 2) self.downfeature = FeatureMapBlock(hidden_channels, output_channels) def forward(self, input_0): primals_1 = self.upfeature.conv.weight primals_2 = self.upfeature.conv.bias primals_4 = self.contract1.conv1.weight primals_5 = self.contract1.conv1.bias primals_6 = self.contract1.conv2.weight primals_7 = self.contract1.conv2.bias primals_8 = self.contract2.conv1.weight primals_9 = self.contract2.conv1.bias primals_10 = self.contract2.conv2.weight primals_11 = self.contract2.conv2.bias primals_12 = self.contract3.conv1.weight primals_13 = self.contract3.conv1.bias primals_14 = self.contract3.conv2.weight primals_15 = self.contract3.conv2.bias primals_16 = self.contract4.conv1.weight primals_17 = self.contract4.conv1.bias primals_18 = self.contract4.conv2.weight primals_19 = self.contract4.conv2.bias primals_20 = self.expanding1.conv1.weight primals_21 = self.expanding1.conv1.bias primals_22 = self.expanding1.conv2.weight primals_23 = self.expanding1.conv2.bias primals_24 = self.expanding1.conv3.weight primals_25 = self.expanding1.conv3.bias primals_26 = self.expanding2.conv1.weight primals_27 = self.expanding2.conv1.bias primals_28 = self.expanding2.conv2.weight primals_29 = self.expanding2.conv2.bias primals_30 = self.expanding2.conv3.weight primals_31 = self.expanding2.conv3.bias primals_32 = self.expanding3.conv1.weight primals_33 = self.expanding3.conv1.bias primals_34 = self.expanding3.conv2.weight primals_35 = self.expanding3.conv2.bias primals_36 = self.expanding3.conv3.weight primals_37 = self.expanding3.conv3.bias primals_38 = self.expanding4.conv1.weight primals_39 = self.expanding4.conv1.bias primals_40 = self.expanding4.conv2.weight primals_41 = self.expanding4.conv2.bias primals_42 = self.expanding4.conv3.weight primals_43 = self.expanding4.conv3.bias primals_44 = self.downfeature.conv.weight primals_45 = self.downfeature.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45]) return output[0]
furkannturkmen/pytorch-CNN-architecture
Unet
false
10,222
[ "MIT" ]
0
6a864811f51409c1526224c288fe608010e0c888
https://github.com/furkannturkmen/pytorch-CNN-architecture/tree/6a864811f51409c1526224c288fe608010e0c888
InverseDepthSmoothnessLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xo/cxopkxo47z5lmvyq2gx2n4exgeyhweclzftd73pco236dokn54dm.py # Topologically Sorted Source Nodes: [image_dx, abs_1, mean, neg, weights_x], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp] # Source node to ATen node mapping: # abs_1 => abs_1 # image_dx => sub_2 # mean => mean # neg => neg # weights_x => exp # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%abs_1, [1], True), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) triton_poi_fused_abs_exp_mean_neg_sub_0 = async_compile.triton('triton_poi_fused_abs_exp_mean_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_exp_mean_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_exp_mean_neg_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 4 x2 = (xindex // 12) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (64*x2)), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), xmask) tmp5 = tl.load(in_ptr0 + (17 + x0 + (4*x1) + (64*x2)), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), xmask) tmp10 = tl.load(in_ptr0 + (33 + x0 + (4*x1) + (64*x2)), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + (4*x1) + (64*x2)), xmask) tmp15 = tl.load(in_ptr0 + (49 + x0 + (4*x1) + (64*x2)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + (x3), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wl/cwl6yd7l3sndnb22swumccpruxx46plry27po4wlv5ktz7r5sl5b.py # Topologically Sorted Source Nodes: [image_dy, abs_2, mean_1, neg_1, weights_y], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp] # Source node to ATen node mapping: # abs_2 => abs_2 # image_dy => sub_3 # mean_1 => mean_1 # neg_1 => neg_1 # weights_y => exp_1 # Graph fragment: # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_27, %slice_31), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%abs_2, [1], True), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) triton_poi_fused_abs_exp_mean_neg_sub_1 = async_compile.triton('triton_poi_fused_abs_exp_mean_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_exp_mean_neg_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_exp_mean_neg_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/o7/co77hkmtzy5ubvbrmxqqwu5n4mggkr3ljkpuheg7zy4q2bdaspun.py # Topologically Sorted Source Nodes: [idepth_dx, image_dx, abs_1, mean, neg, weights_x, mul, smoothness_x, mean_2, idepth_dy, image_dy, abs_2, mean_1, neg_1, weights_y, mul_1, smoothness_y, mean_3, add], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # add => add # idepth_dx => sub # idepth_dy => sub_1 # image_dx => sub_2 # image_dy => sub_3 # mean => mean # mean_1 => mean_1 # mean_2 => mean_2 # mean_3 => mean_3 # mul => mul # mul_1 => mul_1 # neg => neg # neg_1 => neg_1 # smoothness_x => abs_3 # smoothness_y => abs_4 # weights_x => exp # weights_y => exp_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%abs_1, [1], True), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %exp), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_3,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_11, %slice_15), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_27, %slice_31), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%abs_2, [1], True), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %exp_1), kwargs = {}) # %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_1,), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_4,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_2, %mean_3), kwargs = {}) triton_per_fused_abs_add_exp_mean_mul_neg_sub_2 = async_compile.triton('triton_per_fused_abs_add_exp_mean_mul_neg_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_exp_mean_mul_neg_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_exp_mean_mul_neg_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 3 r5 = (rindex // 3) r3 = (rindex // 48) r4 = rindex % 12 r6 = (rindex // 12) tmp0 = tl.load(in_ptr0 + (r0 + (4*r5)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (1 + r0 + (4*r5)), rmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r4 + (12*r3)), rmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + (r4 + (16*r6)), rmask, other=0.0) tmp11 = tl.load(in_ptr0 + (4 + r4 + (16*r6)), rmask, other=0.0) tmp13 = tl.load(in_ptr2 + (r4 + (12*r3)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp12 = tmp10 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tl_math.abs(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = 192.0 tmp21 = tmp9 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 3), (12, 48, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [image_dx, abs_1, mean, neg, weights_x], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_abs_exp_mean_neg_sub_0.run(arg1_1, buf0, 48, grid=grid(48), stream=stream0) buf2 = empty_strided_cuda((4, 1, 3, 4), (12, 48, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [image_dy, abs_2, mean_1, neg_1, weights_y], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp] triton_poi_fused_abs_exp_mean_neg_sub_1.run(arg1_1, buf2, 48, grid=grid(48), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [idepth_dx, image_dx, abs_1, mean, neg, weights_x, mul, smoothness_x, mean_2, idepth_dy, image_dy, abs_2, mean_1, neg_1, weights_y, mul_1, smoothness_y, mean_3, add], Original ATen: [aten.sub, aten.abs, aten.mean, aten.neg, aten.exp, aten.mul, aten.add] triton_per_fused_abs_add_exp_mean_mul_neg_sub_2.run(buf4, arg0_1, buf0, buf2, 1, 192, grid=grid(1), stream=stream0) del arg0_1 del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_exp_mean_neg_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 4 x2 = xindex // 12 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 64 * x2), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), xmask) tmp5 = tl.load(in_ptr0 + (17 + x0 + 4 * x1 + 64 * x2), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), xmask) tmp10 = tl.load(in_ptr0 + (33 + x0 + 4 * x1 + 64 * x2), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), xmask) tmp15 = tl.load(in_ptr0 + (49 + x0 + 4 * x1 + 64 * x2), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) @triton.jit def triton_poi_fused_abs_exp_mean_neg_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_per_fused_abs_add_exp_mean_mul_neg_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 3 r5 = rindex // 3 r3 = rindex // 48 r4 = rindex % 12 r6 = rindex // 12 tmp0 = tl.load(in_ptr0 + (r0 + 4 * r5), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (1 + r0 + 4 * r5), rmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r4 + 12 * r3), rmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + (r4 + 16 * r6), rmask, other=0.0) tmp11 = tl.load(in_ptr0 + (4 + r4 + 16 * r6), rmask, other=0.0) tmp13 = tl.load(in_ptr2 + (r4 + 12 * r3), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp12 = tmp10 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tl_math.abs(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = 192.0 tmp21 = tmp9 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 3), (12, 48, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_exp_mean_neg_sub_0[grid(48)](arg1_1, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1, 3, 4), (12, 48, 4, 1), torch.float32) triton_poi_fused_abs_exp_mean_neg_sub_1[grid(48)](arg1_1, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1 del buf1 triton_per_fused_abs_add_exp_mean_mul_neg_sub_2[grid(1)](buf4, arg0_1, buf0, buf2, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 del buf2 return buf4, def _gradient_x(img: 'torch.Tensor') ->torch.Tensor: assert len(img.shape) == 4, img.shape return img[:, :, :, :-1] - img[:, :, :, 1:] def _gradient_y(img: 'torch.Tensor') ->torch.Tensor: assert len(img.shape) == 4, img.shape return img[:, :, :-1, :] - img[:, :, 1:, :] def inverse_depth_smoothness_loss(idepth: 'torch.Tensor', image: 'torch.Tensor' ) ->torch.Tensor: """Computes image-aware inverse depth smoothness loss. See :class:`~kornia.losses.InvDepthSmoothnessLoss` for details. """ if not torch.is_tensor(idepth): raise TypeError('Input idepth type is not a torch.Tensor. Got {}'. format(type(idepth))) if not torch.is_tensor(image): raise TypeError('Input image type is not a torch.Tensor. Got {}'. format(type(image))) if not len(idepth.shape) == 4: raise ValueError('Invalid idepth shape, we expect BxCxHxW. Got: {}' .format(idepth.shape)) if not len(image.shape) == 4: raise ValueError('Invalid image shape, we expect BxCxHxW. Got: {}'. format(image.shape)) if not idepth.shape[-2:] == image.shape[-2:]: raise ValueError('idepth and image shapes must be the same. Got: {}' .format(idepth.shape)) if not idepth.device == image.device: raise ValueError('idepth and image must be in the same device. Got: {}' .format(idepth.device)) if not idepth.dtype == image.dtype: raise ValueError('idepth and image must be in the same dtype. Got: {}' .format(idepth.dtype)) idepth_dx: 'torch.Tensor' = _gradient_x(idepth) idepth_dy: 'torch.Tensor' = _gradient_y(idepth) image_dx: 'torch.Tensor' = _gradient_x(image) image_dy: 'torch.Tensor' = _gradient_y(image) weights_x: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dx), dim=1, keepdim=True)) weights_y: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dy), dim=1, keepdim=True)) smoothness_x: 'torch.Tensor' = torch.abs(idepth_dx * weights_x) smoothness_y: 'torch.Tensor' = torch.abs(idepth_dy * weights_y) return torch.mean(smoothness_x) + torch.mean(smoothness_y) class InverseDepthSmoothnessLossNew(nn.Module): """Criterion that computes image-aware inverse depth smoothness loss. .. math:: \\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\| \\partial_x I_{ij} \\right \\|} + \\left | \\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|} Shape: - Inverse Depth: :math:`(N, 1, H, W)` - Image: :math:`(N, 3, H, W)` - Output: scalar Examples:: >>> idepth = torch.rand(1, 1, 4, 5) >>> image = torch.rand(1, 3, 4, 5) >>> smooth = kornia.losses.DepthSmoothnessLoss() >>> loss = smooth(idepth, image) """ def __init__(self) ->None: super(InverseDepthSmoothnessLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
IEM-Computer-Vision/kornia
InverseDepthSmoothnessLoss
false
9,297
[ "ECL-2.0", "Apache-2.0" ]
0
f98bd9a2158a6e59cda076d55d476acf13f4e0af
https://github.com/IEM-Computer-Vision/kornia/tree/f98bd9a2158a6e59cda076d55d476acf13f4e0af
PlanarFlow
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/il/cil5hpoi7ygpsypgim2u7imeqozhugxjb3qehifbgnc6wf4rjrsp.py # Topologically Sorted Source Nodes: [uw, softplus, muw, sub, mul, pow_1, sum_1, truediv, uhat, mv, zwb, psi, psi_u, add_4, abs_1, add_5, logdet_jacobian], Original ATen: [aten.dot, aten.softplus, aten.add, aten.sub, aten.mul, aten.pow, aten.sum, aten.div, aten.mv, aten.abs, aten.log] # Source node to ATen node mapping: # abs_1 => abs_1 # add_4 => add_4 # add_5 => add_5 # logdet_jacobian => log # mul => mul_1 # muw => add # mv => mul_2, sum_3 # pow_1 => pow_1 # psi => mul_4 # psi_u => mul_5, sum_4 # softplus => exp, gt, log1p, where # sub => sub # sum_1 => sum_2 # truediv => div # uhat => add_1 # uw => mul, sum_1 # zwb => add_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sum_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sum_1, %log1p), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, -1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %sum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sum_2), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %div), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %primals_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %primals_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_1), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add_4,), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_5,), kwargs = {}) triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=(8,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 15, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (0)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp19 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (3)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp29 = tl.load(in_ptr3 + (0)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp37 = tl.load(in_ptr0 + (0)) tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp52 = tl.load(in_ptr0 + (1)) tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp60 = tl.load(in_ptr0 + (2)) tmp61 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK]) tmp68 = tl.load(in_ptr0 + (3)) tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tmp1 * tmp1 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp13 = tmp10 * tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp22 = tmp19 * tmp21 tmp23 = tmp18 + tmp22 tmp27 = tmp24 * tmp26 tmp28 = tmp23 + tmp27 tmp31 = tmp28 + tmp30 tmp32 = libdevice.tanh(tmp31) tmp33 = tmp32 * tmp32 tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = tmp35 * tmp12 tmp39 = 20.0 tmp40 = tmp5 > tmp39 tmp41 = tl_math.exp(tmp5) tmp42 = libdevice.log1p(tmp41) tmp43 = tl.where(tmp40, tmp5, tmp42) tmp44 = -1.0 tmp45 = tmp43 + tmp44 tmp46 = tmp45 - tmp5 tmp47 = tmp46 * tmp12 tmp48 = tmp47 / tmp9 tmp49 = tmp38 + tmp48 tmp50 = tmp36 * tmp49 tmp51 = tmp35 * tmp16 tmp54 = tmp46 * tmp16 tmp55 = tmp54 / tmp9 tmp56 = tmp53 + tmp55 tmp57 = tmp51 * tmp56 tmp58 = tmp50 + tmp57 tmp59 = tmp35 * tmp21 tmp62 = tmp46 * tmp21 tmp63 = tmp62 / tmp9 tmp64 = tmp61 + tmp63 tmp65 = tmp59 * tmp64 tmp66 = tmp58 + tmp65 tmp67 = tmp35 * tmp26 tmp70 = tmp46 * tmp26 tmp71 = tmp70 / tmp9 tmp72 = tmp69 + tmp71 tmp73 = tmp67 * tmp72 tmp74 = tmp66 + tmp73 tmp75 = tmp74 + tmp34 tmp76 = tl_math.abs(tmp75) tmp77 = 1e-08 tmp78 = tmp76 + tmp77 tmp79 = tl_math.log(tmp78) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp31, None) tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp79, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kk/ckkz4nmmavawr3cwhzxvsdtcrjydick2txrs43laojgo3e6wua2r.py # Topologically Sorted Source Nodes: [mul_1, f_z], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # f_z => add_3 # mul_1 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp12 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (0)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp4 = 20.0 tmp5 = tmp3 > tmp4 tmp6 = tl_math.exp(tmp3) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = -1.0 tmp10 = tmp8 + tmp9 tmp11 = tmp10 - tmp3 tmp13 = tmp11 * tmp12 tmp16 = tmp13 / tmp15 tmp17 = tmp1 + tmp16 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp17 * tmp19 tmp21 = tmp0 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [uw, softplus, muw, sub, mul, pow_1, sum_1, truediv, uhat, mv, zwb, psi, psi_u, add_4, abs_1, add_5, logdet_jacobian], Original ATen: [aten.dot, aten.softplus, aten.add, aten.sub, aten.mul, aten.pow, aten.sum, aten.div, aten.mv, aten.abs, aten.log] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0.run(buf5, primals_1, primals_2, primals_3, primals_4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, f_z], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_1.run(primals_3, primals_1, buf0, primals_2, buf1, buf2, buf3, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del buf2 return (buf3, buf5, primals_1, primals_2, primals_3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + 1) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp19 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + 2) tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + 3) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp29 = tl.load(in_ptr3 + 0) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp37 = tl.load(in_ptr0 + 0) tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp52 = tl.load(in_ptr0 + 1) tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp60 = tl.load(in_ptr0 + 2) tmp61 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK]) tmp68 = tl.load(in_ptr0 + 3) tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tmp1 * tmp1 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp13 = tmp10 * tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp22 = tmp19 * tmp21 tmp23 = tmp18 + tmp22 tmp27 = tmp24 * tmp26 tmp28 = tmp23 + tmp27 tmp31 = tmp28 + tmp30 tmp32 = libdevice.tanh(tmp31) tmp33 = tmp32 * tmp32 tmp34 = 1.0 tmp35 = tmp34 - tmp33 tmp36 = tmp35 * tmp12 tmp39 = 20.0 tmp40 = tmp5 > tmp39 tmp41 = tl_math.exp(tmp5) tmp42 = libdevice.log1p(tmp41) tmp43 = tl.where(tmp40, tmp5, tmp42) tmp44 = -1.0 tmp45 = tmp43 + tmp44 tmp46 = tmp45 - tmp5 tmp47 = tmp46 * tmp12 tmp48 = tmp47 / tmp9 tmp49 = tmp38 + tmp48 tmp50 = tmp36 * tmp49 tmp51 = tmp35 * tmp16 tmp54 = tmp46 * tmp16 tmp55 = tmp54 / tmp9 tmp56 = tmp53 + tmp55 tmp57 = tmp51 * tmp56 tmp58 = tmp50 + tmp57 tmp59 = tmp35 * tmp21 tmp62 = tmp46 * tmp21 tmp63 = tmp62 / tmp9 tmp64 = tmp61 + tmp63 tmp65 = tmp59 * tmp64 tmp66 = tmp58 + tmp65 tmp67 = tmp35 * tmp26 tmp70 = tmp46 * tmp26 tmp71 = tmp70 / tmp9 tmp72 = tmp69 + tmp71 tmp73 = tmp67 * tmp72 tmp74 = tmp66 + tmp73 tmp75 = tmp74 + tmp34 tmp76 = tl_math.abs(tmp75) tmp77 = 1e-08 tmp78 = tmp76 + tmp77 tmp79 = tl_math.log(tmp78) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp31, None) tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp79, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp12 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp4 = 20.0 tmp5 = tmp3 > tmp4 tmp6 = tl_math.exp(tmp3) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = -1.0 tmp10 = tmp8 + tmp9 tmp11 = tmp10 - tmp3 tmp13 = tmp11 * tmp12 tmp16 = tmp13 / tmp15 tmp17 = tmp1 + tmp16 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp17 * tmp19 tmp21 = tmp0 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf4 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0[grid (1)](buf5, primals_1, primals_2, primals_3, primals_4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(16)](primals_3, primals_1, buf0, primals_2, buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 return buf3, buf5, primals_1, primals_2, primals_3, primals_4 class PlanarFlowNew(nn.Module): """Planar normalizing flow [Rezende & Mohamed 2015]. Provides a tighter bound on the ELBO by giving more expressive power to the approximate distribution, such as by introducing covariance between terms. @param in_features: integer number of input dimensions. this is often the dimensionality of the latent variables """ def __init__(self, in_features): super(PlanarFlowNew, self).__init__() self.u = nn.Parameter(torch.randn(in_features)) self.w = nn.Parameter(torch.randn(in_features)) self.b = nn.Parameter(torch.ones(1)) def forward(self, input_0): primals_1 = self.u primals_2 = self.w primals_4 = self.b primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
BratChar/variational-item-response-theory-public
PlanarFlow
false
13,420
[ "MIT" ]
52
12862157e99506a0ed7018f1b8a485d4e61fb5bf
https://github.com/BratChar/variational-item-response-theory-public/tree/12862157e99506a0ed7018f1b8a485d4e61fb5bf
NaiveGate
import torch import torch.nn as nn import torch.nn.functional as F class NaiveGate(nn.Module): """ A naive gate implementation that defines the standard behavior of the gate which determines which experts the tokens are going to. Both the indecies and the score, or confidence, are output to the parent module. The load-balance strategies are also designed to be implemented within the `Gate` module. """ def __init__(self, d_model, num_expert, world_size, top_k=2): super().__init__() self.gate = nn.Linear(d_model, num_expert * world_size) self.top_k = top_k def forward(self, inp): """ The naive implementation simply calculates the top-k of a linear layer's output. """ gate = self.gate(inp) gate_top_k_val, gate_top_k_idx = torch.topk(gate, k=self.top_k, dim =-1, largest=True, sorted=False) gate_top_k_val = gate_top_k_val.view(-1, self.top_k) gate_score = F.softmax(gate_top_k_val, dim=-1).unsqueeze(1) gate_top_k_idx = gate_top_k_idx.view(-1) return gate_top_k_idx, gate_score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'num_expert': 4, 'world_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = torch.ops.aten.topk.default(reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0), 2, -1, True, False) del buf0 buf2 = buf1[0] buf3 = buf1[1] del buf1 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(128)](buf2, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return reinterpret_tensor(buf3, (128,), (1,), 0), reinterpret_tensor(buf4, (64, 1, 2), (2, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), buf3, buf4 class NaiveGateNew(nn.Module): """ A naive gate implementation that defines the standard behavior of the gate which determines which experts the tokens are going to. Both the indecies and the score, or confidence, are output to the parent module. The load-balance strategies are also designed to be implemented within the `Gate` module. """ def __init__(self, d_model, num_expert, world_size, top_k=2): super().__init__() self.gate = nn.Linear(d_model, num_expert * world_size) self.top_k = top_k def forward(self, input_0): primals_1 = self.gate.weight primals_2 = self.gate.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
whn09/fastmoe
NaiveGate
false
4,533
[ "Apache-2.0" ]
0
d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a
https://github.com/whn09/fastmoe/tree/d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a
NormLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/tx/ctxgfdomqe65rl3biiiivn2iqu44cw3hqgbqow2ioi4jjo2s4zi6.py # Topologically Sorted Source Nodes: [linalg_norm, ny, linalg_norm_1, nY, diff, getitem, square, getitem_1, mul, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub, aten.index, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # diff => sub # getitem => index # getitem_1 => index_1 # linalg_norm => pow_1, pow_2, sum_1 # linalg_norm_1 => pow_3, pow_4, sum_2 # mean => mean # mul => mul # nY => div_1 # ny => div # square => pow_5 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, 5.0), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_4, 5.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%sub, [%arg2_1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index, 2), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg3_1, [%arg2_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, %index_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = (rindex // 16) r0 = rindex % 16 r4 = rindex r2 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (r2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r0 + (64*tmp4)), None) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr1 + (16 + r0 + (64*tmp4)), None) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr1 + (32 + r0 + (64*tmp4)), None) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + r0 + (64*tmp4)), None) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = 0.2 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr2 + (r0 + (64*tmp4)), None) tmp21 = tmp20 * tmp20 tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*tmp4)), None) tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tl.load(in_ptr2 + (32 + r0 + (64*tmp4)), None) tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tl.load(in_ptr2 + (48 + r0 + (64*tmp4)), None) tmp29 = tmp28 * tmp28 tmp30 = tmp27 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = tmp31 * tmp18 tmp33 = tmp19 - tmp32 tmp34 = tmp33 * tmp33 tmp36 = tmp35 + tmp1 tmp37 = tmp35 < 0 tmp38 = tl.where(tmp37, tmp36, tmp35) tl.device_assert((0 <= tmp38) & (tmp38 < 4), "index out of bounds: 0 <= tmp38 < 4") tmp40 = tl.load(in_ptr3 + (tmp38), None, eviction_policy='evict_last') tmp41 = tmp40.to(tl.float32) tmp42 = tmp34 * tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tmp46 = 64.0 tmp47 = tmp45 / tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, ), (1, )) assert_size_stride(arg3_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [linalg_norm, ny, linalg_norm_1, nY, diff, getitem, square, getitem_1, mul, mean], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub, aten.index, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0.run(buf2, arg2_1, arg0_1, arg1_1, arg3_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 16 r0 = rindex % 16 r2 = rindex % 4 tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r0 + 64 * tmp4), None) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr1 + (16 + r0 + 64 * tmp4), None) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr1 + (32 + r0 + 64 * tmp4), None) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + r0 + 64 * tmp4), None) tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = 0.2 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr2 + (r0 + 64 * tmp4), None) tmp21 = tmp20 * tmp20 tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * tmp4), None) tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tl.load(in_ptr2 + (32 + r0 + 64 * tmp4), None) tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tl.load(in_ptr2 + (48 + r0 + 64 * tmp4), None) tmp29 = tmp28 * tmp28 tmp30 = tmp27 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = tmp31 * tmp18 tmp33 = tmp19 - tmp32 tmp34 = tmp33 * tmp33 tmp36 = tmp35 + tmp1 tmp37 = tmp35 < 0 tmp38 = tl.where(tmp37, tmp36, tmp35) tl.device_assert((0 <= tmp38) & (tmp38 < 4), 'index out of bounds: 0 <= tmp38 < 4') tmp40 = tl.load(in_ptr3 + tmp38, None, eviction_policy='evict_last') tmp41 = tmp40.to(tl.float32) tmp42 = tmp34 * tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp45 = tl.sum(tmp43, 1)[:, None] tmp46 = 64.0 tmp47 = tmp45 / tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4,), (1,)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_div_index_linalg_vector_norm_mean_mul_pow_sub_0[grid (1)](buf2, arg2_1, arg0_1, arg1_1, arg3_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class NormLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
acycliq/cellpose
NormLoss
false
12,045
[ "BSD-3-Clause" ]
0
6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
https://github.com/acycliq/cellpose/tree/6d7a3f692206bf791e3ea7bd9524ee6df628ed8a
ConvReLUNorm
import torch import torch.cuda import torch.distributed import torch.utils.data import torch.optim class ConvReLUNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNorm, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout = torch.nn.Dropout(dropout) def forward(self, signal): out = torch.nn.functional.relu(self.conv(signal)) out = self.norm(out.transpose(1, 2)).transpose(1, 2) return self.dropout(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.cuda import torch.distributed import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf2 del buf3 del primals_5 return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0 ), primals_1, primals_3, primals_4, buf1 class ConvReLUNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNormNew, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Oreoluwa1234/NeMo
ConvReLUNorm
false
9,707
[ "Apache-2.0" ]
0
b01e3ceed34efe31fd43866685dbdd19a6b30928
https://github.com/Oreoluwa1234/NeMo/tree/b01e3ceed34efe31fd43866685dbdd19a6b30928
RNNCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # hidden => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf3, 64, grid=grid(64), stream=stream0) del primals_4 return (buf2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2, primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf3 class RNNCellNew(nn.Module): def __init__(self, embed_dim, hidden_size, vocab_dim): super().__init__() self.hidden_size = hidden_size self.input2hidden = nn.Linear(embed_dim + hidden_size, hidden_size) def init_hidden(self, batch_size): return torch.zeros(1, batch_size, self.hidden_size) def forward(self, input_0, input_1): primals_3 = self.input2hidden.weight primals_4 = self.input2hidden.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
sdhnshu/HandsOnDeepLearningWithPytorch
RNNCell
false
16,379
[ "MIT" ]
87
2292a952a4cb112b03d5db4048c78bc503eb858d
https://github.com/sdhnshu/HandsOnDeepLearningWithPytorch/tree/2292a952a4cb112b03d5db4048c78bc503eb858d
LogSumPenalty
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7q/c7qpomy3et3phhwkqc7t644bmcri5fm4hhrdq75vw4jwojia65zu.py # Topologically Sorted Source Nodes: [abs_1, add, log, sum_1], Original ATen: [aten.abs, aten.add, aten.log, aten.sum] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # log => log # sum_1 => sum_1 # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {}) triton_per_fused_abs_add_log_sum_0 = async_compile.triton('triton_per_fused_abs_add_log_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_log_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_log_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [abs_1, add, log, sum_1], Original ATen: [aten.abs, aten.add, aten.log, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_add_log_sum_0.run(arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_log_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_log_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf0, class LogSumPenaltyNew(Module): def __init__(self, epsilon=1): super(LogSumPenaltyNew, self).__init__() self.epsilon = epsilon def eta_hat(self, w): w = torch.abs(w) return w * (w + self.epsilon) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dlej/adaptive-dropout
LogSumPenalty
false
10,067
[ "MIT" ]
0
0540b2d06f1f97eb5861c6917eec6c086d33dfa8
https://github.com/dlej/adaptive-dropout/tree/0540b2d06f1f97eb5861c6917eec6c086d33dfa8
GAT
import torch import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 + 64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1) tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1) tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1) tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp41 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp75 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last') tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last') tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last') tmp108 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp109 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last') tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last') tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) tl.store(out_ptr2 + x2, tmp62, xmask) tl.store(out_ptr3 + x2, tmp73, xmask) tl.store(out_ptr4 + x2, tmp96, xmask) tl.store(out_ptr5 + x2, tmp107, xmask) tl.store(out_ptr6 + x2, tmp130, xmask) tl.store(out_ptr7 + x2, tmp141, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr7 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr8 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr9 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr11 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr12 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr13 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr15 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp41 = tl.load(in_ptr16 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp22, xmask) tl.store(out_ptr2 + x3, tmp32, xmask) tl.store(out_ptr3 + x3, tmp42, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp18 & xmask, other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp30 & xmask, other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp39 & xmask, other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x3, tmp52, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_gt_2[grid(256)](primals_4, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10) del primals_5 buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf10, buf11, 512, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0), primals_6, out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf19) del primals_7 buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf19, buf20, 512, XBLOCK=256, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0), primals_8, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf28) del primals_9 buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_0[grid(512)](buf28, buf29, 512, XBLOCK=256, num_warps=4, num_stages=1) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0), primals_10, out=buf30) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(64)](buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf4, buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5, buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(256)](buf4, buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22, buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16, buf25, buf34, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf14 del buf15 del buf2 del buf21 del buf23 del buf24 del buf30 del buf32 del buf33 del buf5 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf0, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf10, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18 ) buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf19, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf19 buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27 ) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf28 buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36 ) buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) triton_poi_fused_cat_6[grid(1024)](buf9, buf18, buf27, buf36, buf37, 1024, XBLOCK=128, num_warps=4, num_stages=1) return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22, buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor( primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor( primals_3, (1, 8), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.matmul(input, self.W) N = h.size()[1] batch_size = h.size(0) a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1), h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self. out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_2 = self.attention_0.W primals_3 = self.attention_0.a primals_5 = self.attention_1.W primals_6 = self.attention_1.a primals_7 = self.attention_2.W primals_8 = self.attention_2.a primals_9 = self.attention_3.W primals_10 = self.attention_3.a primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
daiki-kimura/commonsense-rl
GAT
false
12,256
[ "Apache-2.0" ]
0
5513926957b6501ce9cfa46f77f8f2c1c4892fa5
https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5
Normalize
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional as F class Normalize(Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(Normalize, self).__init__() self.p = p self.dim = dim def forward(self, x): return F.normalize(x, self.p, self.dim, eps=1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(NormalizeNew, self).__init__() self.p = p self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RL-WWW/ISST
Normalize
false
17,820
[ "BSD-3-Clause" ]
5
42b656686fa9660794007a0bc00a7177937410e9
https://github.com/RL-WWW/ISST/tree/42b656686fa9660794007a0bc00a7177937410e9
SEModule
import torch import torch.nn as nn import torch.nn.functional as F class Hswish(nn.Module): def __init__(self, inplace=True): super(Hswish, self).__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0 class Activation(nn.Module): def __init__(self, act_type, inplace=True): super(Activation, self).__init__() act_type = act_type.lower() if act_type == 'relu': self.act = nn.ReLU(inplace=inplace) elif act_type == 'relu6': self.act = nn.ReLU6(inplace=inplace) elif act_type == 'sigmoid': raise NotImplementedError elif act_type == 'hard_sigmoid': self.act = Hsigmoid(inplace) elif act_type == 'hard_swish': self.act = Hswish(inplace=inplace) elif act_type == 'leakyrelu': self.act = nn.LeakyReLU(inplace=inplace) else: raise NotImplementedError def forward(self, inputs): return self.act(inputs) class SEModule(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.relu1 = Activation(act_type='relu', inplace=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) self.hard_sigmoid = Activation(act_type='hard_sigmoid', inplace=True) def forward(self, inputs): outputs = self.avg_pool(inputs) outputs = self.conv1(outputs) outputs = self.relu1(outputs) outputs = self.conv2(outputs) outputs = self.hard_sigmoid(outputs) outputs = inputs * outputs return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.2 tmp5 = tmp3 * tmp4 tmp6 = 3.0 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = 6.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tmp12 = 0.16666666666666666 tmp13 = tmp11 * tmp12 tmp14 = tmp0 * tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused_add_convolution_hardtanh_backward_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.2 tmp4 = tmp2 * tmp3 tmp5 = 3.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tmp9 = 6.0 tmp10 = tmp6 >= tmp9 tmp11 = tmp8 | tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)]( primals_1, buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_hardtanh_backward_mul_3[grid(16)](buf4 , primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_5 return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6 class Hswish(nn.Module): def __init__(self, inplace=True): super(Hswish, self).__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0 class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0 class Activation(nn.Module): def __init__(self, act_type, inplace=True): super(Activation, self).__init__() act_type = act_type.lower() if act_type == 'relu': self.act = nn.ReLU(inplace=inplace) elif act_type == 'relu6': self.act = nn.ReLU6(inplace=inplace) elif act_type == 'sigmoid': raise NotImplementedError elif act_type == 'hard_sigmoid': self.act = Hsigmoid(inplace) elif act_type == 'hard_swish': self.act = Hswish(inplace=inplace) elif act_type == 'leakyrelu': self.act = nn.LeakyReLU(inplace=inplace) else: raise NotImplementedError def forward(self, inputs): return self.act(inputs) class SEModuleNew(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModuleNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.relu1 = Activation(act_type='relu', inplace=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) self.hard_sigmoid = Activation(act_type='hard_sigmoid', inplace=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
manjrekarom/PaddleOCR2Pytorch
SEModule
false
15,996
[ "Apache-2.0" ]
364
6d98508f4c85b9dd3bf022924b0ecc5354ec8281
https://github.com/manjrekarom/PaddleOCR2Pytorch/tree/6d98508f4c85b9dd3bf022924b0ecc5354ec8281
GEGLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/g3/cg3jxrez5zd4oyrbdxcja37kso4ls6az3z6q5iucel6jjkrpi3th.py # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] # Source node to ATen node mapping: # gelu => add, erf, mul, mul_1, mul_2 # mul => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %getitem), kwargs = {}) triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask) tmp9 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_gelu_mul_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GEGLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlansBoyHeart/vit-pytorch
GEGLU
false
1,910
[ "MIT" ]
0
1959adae0bdd7801475bba34d7d61bdc529b4616
https://github.com/AlansBoyHeart/vit-pytorch/tree/1959adae0bdd7801475bba34d7d61bdc529b4616
Accuracy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zj/czjdjyrflbcisam4biblebtuqyafkq32kfbtpbzsspl6b4xtphh2.py # Topologically Sorted Source Nodes: [softmax, prediction], Original ATen: [aten._softmax, aten.argmax] # Source node to ATen node mapping: # prediction => argmax # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%div, -2), kwargs = {}) triton_poi_fused__softmax_argmax_1 = async_compile.triton('triton_poi_fused__softmax_argmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_argmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_argmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp10 = tl.load(in_ptr0 + (4 + (16*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (5 + (16*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (6 + (16*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (7 + (16*x1)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp34 = tl.load(in_ptr0 + (8 + (16*x1)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (9 + (16*x1)), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (10 + (16*x1)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr0 + (11 + (16*x1)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp57 = tl.load(in_ptr0 + (12 + (16*x1)), xmask, eviction_policy='evict_last') tmp58 = tl.load(in_ptr0 + (13 + (16*x1)), xmask, eviction_policy='evict_last') tmp60 = tl.load(in_ptr0 + (14 + (16*x1)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr0 + (15 + (16*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp9 / tmp16 tmp18 = tmp8 > tmp17 tmp19 = tmp8 == tmp17 tmp20 = tmp8 != tmp8 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 0, tl.int64) tmp27 = tl.full([1], 1, tl.int64) tmp28 = tmp26 < tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp23 | tmp29 tmp31 = tl.where(tmp30, tmp8, tmp17) tmp32 = tl.where(tmp30, tmp26, tmp27) tmp36 = tmp34 + tmp35 tmp38 = tmp36 + tmp37 tmp40 = tmp38 + tmp39 tmp41 = tmp33 / tmp40 tmp42 = tmp31 > tmp41 tmp43 = tmp31 == tmp41 tmp44 = tmp31 != tmp31 tmp45 = tmp41 != tmp41 tmp46 = tmp44 > tmp45 tmp47 = tmp42 | tmp46 tmp48 = tmp44 & tmp45 tmp49 = tmp43 | tmp48 tmp50 = tl.full([1], 2, tl.int64) tmp51 = tmp32 < tmp50 tmp52 = tmp49 & tmp51 tmp53 = tmp47 | tmp52 tmp54 = tl.where(tmp53, tmp31, tmp41) tmp55 = tl.where(tmp53, tmp32, tmp50) tmp59 = tmp57 + tmp58 tmp61 = tmp59 + tmp60 tmp63 = tmp61 + tmp62 tmp64 = tmp56 / tmp63 tmp65 = tmp54 > tmp64 tmp66 = tmp54 == tmp64 tmp67 = tmp54 != tmp54 tmp68 = tmp64 != tmp64 tmp69 = tmp67 > tmp68 tmp70 = tmp65 | tmp69 tmp71 = tmp67 & tmp68 tmp72 = tmp66 | tmp71 tmp73 = tl.full([1], 3, tl.int64) tmp74 = tmp55 < tmp73 tmp75 = tmp72 & tmp74 tmp76 = tmp70 | tmp75 tmp77 = tl.where(tmp76, tmp54, tmp64) tmp78 = tl.where(tmp76, tmp55, tmp73) tl.store(out_ptr0 + (x2), tmp78, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/dp/cdpemgebi7is3mfuap3dlfrpijxckzdvrrnaly6xxb4yenoeidgc.py # Topologically Sorted Source Nodes: [scores, sum_1, truediv], Original ATen: [aten.eq, aten.sum, aten.div] # Source node to ATen node mapping: # scores => eq # sum_1 => sum_2 # truediv => div_1 # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%argmax, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 256.0), kwargs = {}) triton_per_fused_div_eq_sum_2 = async_compile.triton('triton_per_fused_div_eq_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_eq_sum_2', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_eq_sum_2(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (r2), None) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tmp7.to(tl.float32) tmp9 = 0.00390625 tmp10 = tmp8 * tmp9 tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [softmax, prediction], Original ATen: [aten._softmax, aten.argmax] triton_poi_fused__softmax_argmax_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 buf3 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [scores, sum_1, truediv], Original ATen: [aten.eq, aten.sum, aten.div] triton_per_fused_div_eq_sum_2.run(buf1, arg1_1, buf3, 1, 256, grid=grid(1), stream=stream0) del arg1_1 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_argmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp10 = tl.load(in_ptr0 + (4 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (5 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (6 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (7 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp34 = tl.load(in_ptr0 + (8 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (9 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp37 = tl.load(in_ptr0 + (10 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr0 + (11 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp56 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp57 = tl.load(in_ptr0 + (12 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (13 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr0 + (14 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp62 = tl.load(in_ptr0 + (15 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp9 / tmp16 tmp18 = tmp8 > tmp17 tmp19 = tmp8 == tmp17 tmp20 = tmp8 != tmp8 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 0, tl.int64) tmp27 = tl.full([1], 1, tl.int64) tmp28 = tmp26 < tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp23 | tmp29 tmp31 = tl.where(tmp30, tmp8, tmp17) tmp32 = tl.where(tmp30, tmp26, tmp27) tmp36 = tmp34 + tmp35 tmp38 = tmp36 + tmp37 tmp40 = tmp38 + tmp39 tmp41 = tmp33 / tmp40 tmp42 = tmp31 > tmp41 tmp43 = tmp31 == tmp41 tmp44 = tmp31 != tmp31 tmp45 = tmp41 != tmp41 tmp46 = tmp44 > tmp45 tmp47 = tmp42 | tmp46 tmp48 = tmp44 & tmp45 tmp49 = tmp43 | tmp48 tmp50 = tl.full([1], 2, tl.int64) tmp51 = tmp32 < tmp50 tmp52 = tmp49 & tmp51 tmp53 = tmp47 | tmp52 tmp54 = tl.where(tmp53, tmp31, tmp41) tmp55 = tl.where(tmp53, tmp32, tmp50) tmp59 = tmp57 + tmp58 tmp61 = tmp59 + tmp60 tmp63 = tmp61 + tmp62 tmp64 = tmp56 / tmp63 tmp65 = tmp54 > tmp64 tmp66 = tmp54 == tmp64 tmp67 = tmp54 != tmp54 tmp68 = tmp64 != tmp64 tmp69 = tmp67 > tmp68 tmp70 = tmp65 | tmp69 tmp71 = tmp67 & tmp68 tmp72 = tmp66 | tmp71 tmp73 = tl.full([1], 3, tl.int64) tmp74 = tmp55 < tmp73 tmp75 = tmp72 & tmp74 tmp76 = tmp70 | tmp75 tl.where(tmp76, tmp54, tmp64) tmp78 = tl.where(tmp76, tmp55, tmp73) tl.store(out_ptr0 + x2, tmp78, xmask) @triton.jit def triton_per_fused_div_eq_sum_2(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + r2, None) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tmp7.to(tl.float32) tmp9 = 0.00390625 tmp10 = tmp8 * tmp9 tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused__softmax_argmax_1[grid(64)](buf0, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) del buf0 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_eq_sum_2[grid(1)](buf1, arg1_1, buf3, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf1 return buf3, class AccuracyNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BShennette/Pno-ai
Accuracy
false
11,228
[ "MIT" ]
0
486434bfb40887d06e3d12a66831b9e0e7d020c2
https://github.com/BShennette/Pno-ai/tree/486434bfb40887d06e3d12a66831b9e0e7d020c2
BasicBlock1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/yc/cyc22ofksedv27pfbzelnb4w34yylhyqovnpjyfto4qrzov2wriv.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %relu], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/as/casawnq4v3qkxvanufkeovwbmhrlvnzi55imfmscbraafjzufzyj.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out => convolution # out_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_3, buf0, primals_2, buf1, 512, grid=grid(512), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf0, primals_2, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, buf0, primals_2, buf1, 512, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf0, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3, buf2 class BasicBlock1New(nn.Module): def __init__(self, input_dim, output_dim): super(BasicBlock1New, self).__init__() self.ID = input_dim self.conv = nn.Conv2d(in_channels=input_dim, out_channels= output_dim, kernel_size=1, padding=0, stride=1) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Houseqin/PytorchToCaffe
BasicBlock1
false
9,112
[ "MIT" ]
0
e94224ba6414e76369f191e7e3d9731c12ce2bd7
https://github.com/Houseqin/PytorchToCaffe/tree/e94224ba6414e76369f191e7e3d9731c12ce2bd7
TargetContextGate
import torch import torch.nn as nn import torch.cuda class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGate(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh(z * target + source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = buf2 del buf2 triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf5, buf1, buf4, primals_7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 return buf5, primals_3, buf0, buf1, buf3, buf4, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGateNew(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrewM1998/MultimodalNMT
TargetContextGate
false
7,710
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
TransformerEncoderLayer
import torch from torch import Tensor from typing import Optional from torch import nn def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayer(nn.Module): """ Modified from torch.nn.TransformerEncoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). normalize_before: whether to use layer_norm before the first block. Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src: 'Tensor', src_mask: 'Optional[Tensor]'=None, src_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor: """ Pass the input through the encoder layer. Args: src: the sequence to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). Shape: src: (S, N, E). src_mask: (S, S). src_key_padding_mask: (N, S). S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number """ residual = src if self.normalize_before: src = self.norm1(src) src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = residual + self.dropout1(src2) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src2) if not self.normalize_before: src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (2048, 4), (4, 1)) assert_size_stride(primals_11, (2048,), (1,)) assert_size_stride(primals_12, (4, 2048), (2048, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_7 buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del primals_9 buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 2048), (1, 4), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(8192)](buf17, primals_11, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (2048, 4), (1, 2048), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12, primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor( buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4 ), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayerNew(nn.Module): """ Modified from torch.nn.TransformerEncoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). normalize_before: whether to use layer_norm before the first block. Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerEncoderLayerNew, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerEncoderLayerNew, self).__setstate__(state) def forward(self, input_0): primals_4 = self.self_attn.in_proj_weight primals_5 = self.self_attn.in_proj_bias primals_1 = self.self_attn.out_proj.weight primals_2 = self.self_attn.out_proj.bias primals_10 = self.linear1.weight primals_11 = self.linear1.bias primals_12 = self.linear2.weight primals_3 = self.linear2.bias primals_7 = self.norm1.weight primals_8 = self.norm1.bias primals_9 = self.norm2.weight primals_13 = self.norm2.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
johnjosephmorgan/snowfall
TransformerEncoderLayer
false
12,761
[ "Apache-2.0" ]
0
604d789c0aed035626d6745e6d7a427168063cae
https://github.com/johnjosephmorgan/snowfall/tree/604d789c0aed035626d6745e6d7a427168063cae
RMSE_log
import torch import torch.nn as nn import torch.nn.functional as F class RMSE_log(nn.Module): def __init__(self): super(RMSE_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinear') loss = torch.sqrt(torch.mean(torch.abs(torch.log(real) - torch.log( fake)) ** 2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSE_logNew(nn.Module): def __init__(self): super(RMSE_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
d4l3k/crowds
RMSE_log
false
12,237
[ "MIT" ]
0
a57eee80d66498474c86cec22dd77be9d627ad97
https://github.com/d4l3k/crowds/tree/a57eee80d66498474c86cec22dd77be9d627ad97
SimpleTanhModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleTanhModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModel, self).__init__() self.inplace = inplace def forward(self, tensor): tensor = tensor + tensor return tensor.tanh_() if self.inplace else tensor.tanh() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.tanh(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleTanhModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
andreas-hommel/glow
SimpleTanhModel
false
3,357
[ "Apache-2.0" ]
0
2bbbf8188a2a941e85677c83f2146bbd076a262e
https://github.com/andreas-hommel/glow/tree/2bbbf8188a2a941e85677c83f2146bbd076a262e
VAE
import torch from torch import nn import torch.utils.data from torch.nn import functional as F import torch.cuda class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data from torch.nn import functional as F import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400,), (1,)) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(80)](buf2, buf5, buf3, buf6, 80, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(1600)](buf8, primals_9, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self): super(VAENew, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
Code-Cornelius/libraries
VAE
false
5,032
[ "MIT" ]
1
2ebd5f78dcedfdce1416280d7d40de7691906951
https://github.com/Code-Cornelius/libraries/tree/2ebd5f78dcedfdce1416280d7d40de7691906951
Network
import torch import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self, input_size, nb_action): super(Network, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 30) self.fc2 = nn.Linear(30, nb_action) def forward(self, state): x = F.relu(self.fc1(state)) q_values = self.fc2(x) return q_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'nb_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (30, 4), (4, 1)) assert_size_stride(primals_2, (30,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 30), (30, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1, primals_2, buf3, 1920, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), primals_4, buf3 class NetworkNew(nn.Module): def __init__(self, input_size, nb_action): super(NetworkNew, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 30) self.fc2 = nn.Linear(30, nb_action) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ExileExodus/Deep-Reinforcement-Learning
Network
false
9,014
[ "MIT" ]
0
0007e5c4b74e920c250a15c18762966e1b55c17d
https://github.com/ExileExodus/Deep-Reinforcement-Learning/tree/0007e5c4b74e920c250a15c18762966e1b55c17d
BatchDHCN
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils import torch.utils.data import torch.optim class BatchDHCN(nn.Module): """docstring for BatchDHCN""" def __init__(self, embed_size=512, output_size=512, num_channel=2, conv_size=3, batch_norm=True): super(BatchDHCN, self).__init__() self.batch_norm = batch_norm self.embed_size = embed_size self.output_size = output_size self.num_channel = num_channel self.padding = nn.ZeroPad2d((0, conv_size - 1, conv_size - 1, 0)) self.conv_1 = nn.Conv2d(self.num_channel, self.output_size, ( conv_size, conv_size)) self.dropout = nn.Dropout(p=0.5) def forward(self, x): x_conv_1 = self.conv_1(self.padding(x)) x_conv_1 = F.relu(x_conv_1) return x_conv_1 def get_inputs(): return [torch.rand([4, 2, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2 y1 = yindex // 2 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 2 * x2 + 18 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex // 6 x2 = xindex % 6 y4 = yindex x5 = xindex y0 = yindex % 2 y1 = yindex // 2 tmp0 = -2 + x3 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x2 tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (-8 + x2 + 4 * x3 + 16 * y4), tmp6 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (y0 + 2 * x5 + 72 * y1), tmp7, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 8192 * y1), tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (512, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((512, 2, 3, 3), (18, 1, 6, 2), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 9)](primals_2, buf0, 1024, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32) triton_poi_fused_constant_pad_nd_1[grid(8, 36)](primals_1, buf1, 8, 36, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) buf4 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(2048, 16)]( buf2, primals_3, buf3, buf4, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf2 del primals_3 return buf3, buf0, buf1, buf4 class BatchDHCNNew(nn.Module): """docstring for BatchDHCN""" def __init__(self, embed_size=512, output_size=512, num_channel=2, conv_size=3, batch_norm=True): super(BatchDHCNNew, self).__init__() self.batch_norm = batch_norm self.embed_size = embed_size self.output_size = output_size self.num_channel = num_channel self.padding = nn.ZeroPad2d((0, conv_size - 1, conv_size - 1, 0)) self.conv_1 = nn.Conv2d(self.num_channel, self.output_size, ( conv_size, conv_size)) self.dropout = nn.Dropout(p=0.5) def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
deeplearning2020/self
BatchDHCN
false
1,830
[ "MIT" ]
0
cf0e6f9acdcfe17906c6327042d25ac9c8894885
https://github.com/deeplearning2020/self/tree/cf0e6f9acdcfe17906c6327042d25ac9c8894885
_MultipleInputNetwork
import torch import torch.nn as _nn class _MultipleInputNetwork(_nn.Module): def __init__(self): super(_MultipleInputNetwork, self).__init__() self.conv = _nn.Conv2d(3, 16, 3) def forward(self, inp1, inp2): inp = inp1 * inp2 out = self.conv(inp) return out def get_inputs(): return [torch.rand([4, 3, 64, 64]), torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as _nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + x0, None) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 246016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_4, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(49152)](primals_1, primals_2, buf0, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 62, 62), (61504, 3844, 62, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(246016)](buf2, primals_4, 246016, XBLOCK=1024, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, buf0 class _MultipleInputNetworkNew(_nn.Module): def __init__(self): super(_MultipleInputNetworkNew, self).__init__() self.conv = _nn.Conv2d(3, 16, 3) def forward(self, input_0, input_1): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Yifanfanfanfan/torchutils
_MultipleInputNetwork
false
18,129
[ "MIT" ]
9
939331d28fcee97bfb0a4b2eaab8e799877fb0dc
https://github.com/Yifanfanfanfan/torchutils/tree/939331d28fcee97bfb0a4b2eaab8e799877fb0dc
UpsampleConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hg/chgxzii65iyfdwuwk6btc2ievya3atcd4si6chvog45v2d5sekhj.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.pixel_shuffle] # Source node to ATen node mapping: # output_1 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_pixel_shuffle_0 = async_compile.triton('triton_poi_fused_pixel_shuffle_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pixel_shuffle_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 4 x2 = (xindex // 8) % 2 x3 = (xindex // 16) % 4 x5 = (xindex // 256) x6 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x3) + (16*x0) + (32*x2) + (64*x5)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x6), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/mt/cmt4roffhwfg6vw2odjfrgu4bjav3cztqx74kxjfq5igljucibfl.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_2, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.pixel_shuffle] stream0 = get_raw_stream(0) triton_poi_fused_pixel_shuffle_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pixel_shuffle_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 % 2 x3 = xindex // 16 % 4 x5 = xindex // 256 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3 + 16 * x0 + 32 * x2 + 64 * x5), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x6, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_pixel_shuffle_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0) def spectral_norm(layer, n_iters=1): return torch.nn.utils.spectral_norm(layer, n_power_iterations=n_iters) class UpsampleConvNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, spec_norm=False): super().__init__() self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) if spec_norm: self.conv = spectral_norm(self.conv) self.pixelshuffle = nn.PixelShuffle(upscale_factor=2) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sriram-Ravula/Inverse_Meta
UpsampleConv
false
2,851
[ "MIT" ]
0
c6c1e4ae0d670093156249c60d74373b22d61f01
https://github.com/Sriram-Ravula/Inverse_Meta/tree/c6c1e4ae0d670093156249c60d74373b22d61f01
CoralLayer
import torch class CoralLayer(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. """ def __init__(self, size_in, num_classes): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1). float()) def forward(self, x): """ Computes forward pass. Parameters ----------- x : torch.tensor, shape=(num_examples, num_features) Input features. Returns ----------- logits : torch.tensor, shape=(num_examples, num_classes-1) """ return self.coral_weights(x) + self.coral_bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 x0 = xindex % 3 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(192)](buf0, primals_3, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CoralLayerNew(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. """ def __init__(self, size_in, num_classes): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1). float()) def forward(self, input_0): primals_3 = self.coral_bias primals_1 = self.coral_weights.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CHNxindong/corn-ordinal-neuralnet
CoralLayer
false
4,933
[ "MIT" ]
1
7f8a45614cb6488e9c019c5e9d3a5aee0d714e94
https://github.com/CHNxindong/corn-ordinal-neuralnet/tree/7f8a45614cb6488e9c019c5e9d3a5aee0d714e94
L2
import torch import torch.utils.data import torch import torch.nn as nn class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output - target, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L2New(nn.Module): def __init__(self): super(L2New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
WeisiX/ITAS3D
L2
false
18,111
[ "MIT" ]
4
fc861e0cb2d4516905bfadab5e5e880c2b021832
https://github.com/WeisiX/ITAS3D/tree/fc861e0cb2d4516905bfadab5e5e880c2b021832
Net
import torch from torch import Tensor from torch.functional import Tensor import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 60, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(60, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x: 'Tensor'): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 188160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 60 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 47040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 11760 x4 = xindex % 11760 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 11776 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 11776 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (60, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (60,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 60, 5, 5), (1500, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 60, 28, 28), (47040, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(188160)](buf1, primals_2, 188160, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 60, 14, 14), (11776, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 60, 14, 14), (11776, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(47040)](buf1, buf2, buf3, 47040, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 60, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(60, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
minister19/RL_pytorch_get_started
Net
false
4,014
[ "MIT" ]
0
e444f524a14d329f9a25c53f102bc96c4ea36ad8
https://github.com/minister19/RL_pytorch_get_started/tree/e444f524a14d329f9a25c53f102bc96c4ea36ad8
NormScaleFeature
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/z3/cz33rq7gb3mccgzwpt6uvrtpcvtka4pm6wvkq7bnb5vj2gtsqkxk.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, magnitudes, mul, output], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # magnitudes => add # mul => mul # output => div # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = tmp3 / tmp17 tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, magnitudes, mul, output], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = tmp3 / tmp17 tl.store(out_ptr0 + x3, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class NormScaleFeatureNew(nn.Module): def __init__(self, init_value=1): super().__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
uncbiag/FeatureMapICON
NormScaleFeature
false
10,874
[ "Apache-2.0" ]
0
04160d0ce4e8f7615e1c59a1be5c6b8340b5b6e5
https://github.com/uncbiag/FeatureMapICON/tree/04160d0ce4e8f7615e1c59a1be5c6b8340b5b6e5
BertOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hk/chkxni526sa46elz3ztn5esaetlclniw4nbm3ra7wq5sova5mkd5.py # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # u => mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/2a/c2atmp2erpyl7if7c6pxfntsmuvqm7hau7e33v4asqtjrqk6cihx.py # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # add => add # sub => sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/gb/cgbyxsu2ykccfnkicedqiouveki57g42dz4fqztnk7wx3624yuma.py # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2 # mul => mul # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0) del primals_6 return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn import torch import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertOutputNew(nn.Module): def __init__(self, config): super(BertOutputNew, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
IsmaelElsharkawi/new_pororo_repo
BertOutput
false
8,811
[ "MIT" ]
19
4617083b420615b8a3eb0f44d02e4e91a8f407f7
https://github.com/IsmaelElsharkawi/new_pororo_repo/tree/4617083b420615b8a3eb0f44d02e4e91a8f407f7
TransposeMultiheadAttention
import torch import torch.nn as nn from typing import Optional import torch.utils.data import torch.nn class TransposeMultiheadAttention(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: """ Args: x (torch.Tensor): tensor of shape (batch_size, seq_len, feature_dim) mask (torch.Tensor): bool tensor with shape (batch_size, seq_len). Sequence elements that are False are invalid. Returns: Tensor with shape (batch_size, seq_len, feature_dim) """ assert x.dim( ) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)' if mask is not None: mask[:, 0] = True mask = ~mask x = x.transpose(0, 1) attn_output, self._attention_weights = self._attention(x, x, x, key_padding_mask=mask) attn_output = attn_output.transpose(0, 1) return attn_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from typing import Optional import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mean_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1.0 tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(192)](buf1, primals_2, buf2, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 64), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mean_4[grid(64)](buf5, buf6, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 128), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 return reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0 ), buf10, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 128 ), reinterpret_tensor(buf3, (4, 4, 4), (4, 1, 16), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 64) class TransposeMultiheadAttentionNew(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, input_0): primals_3 = self._attention.in_proj_weight primals_2 = self._attention.in_proj_bias primals_4 = self._attention.out_proj.weight primals_5 = self._attention.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
TheShadow29/pytorchvideo
TransposeMultiheadAttention
false
9,701
[ "Apache-2.0" ]
0
39a3e34e33fb0e1ec142288df08f6e8c3585961a
https://github.com/TheShadow29/pytorchvideo/tree/39a3e34e33fb0e1ec142288df08f6e8c3585961a
AdjustSigmoid
from torch.nn import Module import torch from torch import Tensor from typing import Optional def KORNIA_CHECK_IS_TENSOR(x, msg: 'Optional[str]'=None): if not isinstance(x, Tensor): raise TypeError(f'Not a Tensor type. Got: {type(x)}.\n{msg}') def adjust_sigmoid(image: 'Tensor', cutoff: 'float'=0.5, gain: 'float'=10, inv: 'bool'=False) ->Tensor: """Adjust sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions", http://markfairchild.org/PDFs/PAP07.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the inverse sigmoid correction. Returns: Adjusted tensor in the shape of :math:`(*, H, W)`. Example: >>> x = torch.ones(1, 1, 2, 2) >>> adjust_sigmoid(x, gain=0) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]]) """ KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)') if inv: img_adjust = 1 - 1 / (1 + (gain * (cutoff - image)).exp()) else: img_adjust = 1 / (1 + (gain * (cutoff - image)).exp()) return img_adjust class AdjustSigmoid(Module): """Adjust the contrast of an image tensor or performs sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions", http://markfairchild.org/PDFs/PAP07.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the negative sigmoid correction. Example: >>> x = torch.ones(1, 1, 2, 2) >>> AdjustSigmoid(gain=0)(x) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]]) """ def __init__(self, cutoff: 'float'=0.5, gain: 'float'=10, inv: 'bool'=False ) ->None: super().__init__() self.cutoff: 'float' = cutoff self.gain: 'float' = gain self.inv: 'bool' = inv def forward(self, image: 'Tensor') ->Tensor: return adjust_sigmoid(image, cutoff=self.cutoff, gain=self.gain, inv=self.inv) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch import Tensor from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_reciprocal_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp1 - tmp0 tmp3 = 10.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 1, tl.int32) tmp9 = tmp8 / tmp7 tmp10 = tmp9 * tmp6 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_reciprocal_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def KORNIA_CHECK_IS_TENSOR(x, msg: 'Optional[str]'=None): if not isinstance(x, Tensor): raise TypeError(f'Not a Tensor type. Got: {type(x)}.\n{msg}') def adjust_sigmoid(image: 'Tensor', cutoff: 'float'=0.5, gain: 'float'=10, inv: 'bool'=False) ->Tensor: """Adjust sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions", http://markfairchild.org/PDFs/PAP07.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the inverse sigmoid correction. Returns: Adjusted tensor in the shape of :math:`(*, H, W)`. Example: >>> x = torch.ones(1, 1, 2, 2) >>> adjust_sigmoid(x, gain=0) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]]) """ KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)') if inv: img_adjust = 1 - 1 / (1 + (gain * (cutoff - image)).exp()) else: img_adjust = 1 / (1 + (gain * (cutoff - image)).exp()) return img_adjust class AdjustSigmoidNew(Module): """Adjust the contrast of an image tensor or performs sigmoid correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions", http://markfairchild.org/PDFs/PAP07.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. cutoff: The cutoff of sigmoid function. gain: The multiplier of sigmoid function. inv: If is set to True the function will return the negative sigmoid correction. Example: >>> x = torch.ones(1, 1, 2, 2) >>> AdjustSigmoid(gain=0)(x) tensor([[[[0.5000, 0.5000], [0.5000, 0.5000]]]]) """ def __init__(self, cutoff: 'float'=0.5, gain: 'float'=10, inv: 'bool'=False ) ->None: super().__init__() self.cutoff: 'float' = cutoff self.gain: 'float' = gain self.inv: 'bool' = inv def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
YanivHollander/kornia
AdjustSigmoid
false
14,628
[ "ECL-2.0", "Apache-2.0" ]
418
ccd258d0956da89b1feca96448eff8e4969d405a
https://github.com/YanivHollander/kornia/tree/ccd258d0956da89b1feca96448eff8e4969d405a
AdaIN
import math import torch import torch.nn as nn from numpy import prod def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = prod(size[1:]) return math.sqrt(2.0 / fan_in) class ConstrainedLayer(nn.Module): """ A handy refactor that allows the user to: - initialize one layer's bias to zero - apply He's initialization at runtime """ def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True): """ equalized (bool): if true, the layer's weight should evolve within the range (-1, 1) initBiasToZero (bool): if true, bias will be initialized to zero """ super(ConstrainedLayer, self).__init__() self.module = module self.equalized = equalized if initBiasToZero: self.module.bias.data.fill_(0) if self.equalized: self.module.weight.data.normal_(0, 1) self.module.weight.data /= lrMul self.weight = getLayerNormalizationFactor(self.module) * lrMul def forward(self, x): x = self.module(x) if self.equalized: x *= self.weight return x class EqualizedLinear(ConstrainedLayer): def __init__(self, nChannelsPrevious, nChannels, bias=True, **kwargs): """ A nn.Linear module with specific constraints Args: nChannelsPrevious (int): number of channels in the previous layer nChannels (int): number of channels of the current layer bias (bool): with bias ? """ ConstrainedLayer.__init__(self, nn.Linear(nChannelsPrevious, nChannels, bias=bias), **kwargs) class AdaIN(nn.Module): def __init__(self, dimIn, dimOut, epsilon=1e-08): super(AdaIN, self).__init__() self.epsilon = epsilon self.styleModulator = EqualizedLinear(dimIn, 2 * dimOut, equalized= True, initBiasToZero=True) self.dimOut = dimOut def forward(self, x, y): batchSize, nChannel, _width, _height = x.size() tmpX = x.view(batchSize, nChannel, -1) mux = tmpX.mean(dim=2).view(batchSize, nChannel, 1, 1) varx = torch.clamp((tmpX * tmpX).mean(dim=2).view(batchSize, nChannel, 1, 1) - mux * mux, min=0) varx = torch.rsqrt(varx + self.epsilon) x = (x - mux) * varx styleY = self.styleModulator(y) yA = styleY[:, :self.dimOut].view(batchSize, self.dimOut, 1, 1) yB = styleY[:, self.dimOut:].view(batchSize, self.dimOut, 1, 1) return yA * x + yB def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dimIn': 4, 'dimOut': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn from numpy import prod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_clamp_mean_mul_rsqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 16.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = 1e-08 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp22 = tmp20 + tmp21 tmp23 = 0.7071067811865476 tmp24 = tmp22 * tmp23 tmp25 = tmp0 - tmp11 tmp26 = tmp25 * tmp19 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp30 * tmp23 tmp32 = tmp27 + tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp19, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf4) del primals_2 buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_clamp_mean_mul_rsqrt_sub_0[grid(16)](buf1, buf3, primals_1, buf4, primals_3, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf4 del primals_3 return buf5, primals_1, primals_4, reinterpret_tensor(buf1, (4, 4, 1, 1 ), (4, 1, 1, 1), 0), buf3 def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = prod(size[1:]) return math.sqrt(2.0 / fan_in) class ConstrainedLayer(nn.Module): """ A handy refactor that allows the user to: - initialize one layer's bias to zero - apply He's initialization at runtime """ def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True): """ equalized (bool): if true, the layer's weight should evolve within the range (-1, 1) initBiasToZero (bool): if true, bias will be initialized to zero """ super(ConstrainedLayer, self).__init__() self.module = module self.equalized = equalized if initBiasToZero: self.module.bias.data.fill_(0) if self.equalized: self.module.weight.data.normal_(0, 1) self.module.weight.data /= lrMul self.weight = getLayerNormalizationFactor(self.module) * lrMul def forward(self, x): x = self.module(x) if self.equalized: x *= self.weight return x class EqualizedLinear(ConstrainedLayer): def __init__(self, nChannelsPrevious, nChannels, bias=True, **kwargs): """ A nn.Linear module with specific constraints Args: nChannelsPrevious (int): number of channels in the previous layer nChannels (int): number of channels of the current layer bias (bool): with bias ? """ ConstrainedLayer.__init__(self, nn.Linear(nChannelsPrevious, nChannels, bias=bias), **kwargs) class AdaINNew(nn.Module): def __init__(self, dimIn, dimOut, epsilon=1e-08): super(AdaINNew, self).__init__() self.epsilon = epsilon self.styleModulator = EqualizedLinear(dimIn, 2 * dimOut, equalized= True, initBiasToZero=True) self.dimOut = dimOut def forward(self, input_0, input_1): primals_2 = self.styleModulator.module.weight primals_3 = self.styleModulator.module.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Andribi/pytorch_GAN_zoo
AdaIN
false
4,862
[ "BSD-3-Clause" ]
1
b37c7268cbd4ec7dc61ba65a3ccf11af71247597
https://github.com/Andribi/pytorch_GAN_zoo/tree/b37c7268cbd4ec7dc61ba65a3ccf11af71247597
PolicyNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rm/crmfikkxblrhxfynyknfm2x3wwcwtibkjkkbyhzwmxqi4kmwkosl.py # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # log_std_1 => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_9, -20), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_9, -20), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_9, 2), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_1 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf11, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_9, buf10, 256, grid=grid(256), stream=stream0) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_1.run(buf7, primals_11, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf7 del primals_11 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (64, 4), (4, 1), 0), buf9, primals_10, buf10, primals_8, primals_6, buf11, primals_4, buf12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6, primals_9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf7, primals_11, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_11 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (64, 4), (4, 1), 0 ), buf9, primals_10, buf10, primals_8, primals_6, buf11, primals_4, buf12 class PolicyNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003, log_std_min=-20, log_std_max=2): super(PolicyNetworkNew, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.mean_linear = nn.Linear(hidden_size, num_actions) self.mean_linear.weight.data.uniform_(-init_w, init_w) self.mean_linear.bias.data.uniform_(-init_w, init_w) self.log_std_linear1 = nn.Linear(hidden_size, hidden_size) self.log_std_linear2 = nn.Linear(hidden_size, num_actions) self.log_std_linear2.weight.data.uniform_(-init_w, init_w) self.log_std_linear2.bias.data.uniform_(-init_w, init_w) def evaluate(self, state, epsilon=1e-06): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.rsample() action = torch.tanh(z) log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon) log_prob = log_prob.sum(-1, keepdim=True) return action, log_prob, z, mean, log_std def get_action(self, state): state = torch.FloatTensor(state).unsqueeze(0) mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.sample() action = torch.tanh(z) action = action.detach().cpu().numpy() return action[0] def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.mean_linear.weight primals_7 = self.mean_linear.bias primals_8 = self.log_std_linear1.weight primals_9 = self.log_std_linear1.bias primals_10 = self.log_std_linear2.weight primals_11 = self.log_std_linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
watermeleon/spot_mini_mini
PolicyNetwork
false
13,101
[ "MIT" ]
0
8622d3b0e0a95f7c548cacb6722a94f61a7e2b4b
https://github.com/watermeleon/spot_mini_mini/tree/8622d3b0e0a95f7c548cacb6722a94f61a7e2b4b
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/mb/cmb72vxh36b4k6lvmt4562lj3nrqtpyzst2qbon2yqx22gdjfa7x.py # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # y_pred => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf1, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(16)](buf1, 16, XBLOCK=16, num_warps =1, num_stages=1) return buf1, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class ModelNew(torch.nn.Module): def __init__(self, D_in, D_out): super(ModelNew, self).__init__() self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out), requires_grad=True) self.sig = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.w1 primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Saran-nns/gradients
Model
false
1,018
[ "MIT" ]
0
67f9ff92589047828563dbbe30f225dca7ad47fd
https://github.com/Saran-nns/gradients/tree/67f9ff92589047828563dbbe30f225dca7ad47fd
MSE_disc
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/4s/c4s3xowwp3oadled6o7j4eugj44hmnk7o4jpxawswyucmffhmpdw.py # Topologically Sorted Source Nodes: [sub, loss, mean], Original ATen: [aten.sub, aten.pow, aten.mean] # Source node to ATen node mapping: # loss => pow_1 # mean => mean # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {}) triton_poi_fused_mean_pow_sub_0 = async_compile.triton('triton_poi_fused_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr0 + (64 + x0), xmask) tmp5 = tl.load(in_ptr1 + (64 + x0), xmask) tmp9 = tl.load(in_ptr0 + (128 + x0), xmask) tmp10 = tl.load(in_ptr1 + (128 + x0), xmask) tmp14 = tl.load(in_ptr0 + (192 + x0), xmask) tmp15 = tl.load(in_ptr1 + (192 + x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tl.store(out_ptr0 + (x0), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, loss, mean], Original ATen: [aten.sub, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_pow_sub_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr0 + (64 + x0), xmask) tmp5 = tl.load(in_ptr1 + (64 + x0), xmask) tmp9 = tl.load(in_ptr0 + (128 + x0), xmask) tmp10 = tl.load(in_ptr1 + (128 + x0), xmask) tmp14 = tl.load(in_ptr0 + (192 + x0), xmask) tmp15 = tl.load(in_ptr1 + (192 + x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_pow_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class MSE_discNew(nn.Module): def __init__(self, weight_list=None): super(MSE_discNew, self).__init__() self.weight_list = weight_list def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Sampson-Lee/SIB-Net
MSE_disc
false
2,809
[ "MIT" ]
0
650399082e9237327fa38168ccfc7d48153a1db5
https://github.com/Sampson-Lee/SIB-Net/tree/650399082e9237327fa38168ccfc7d48153a1db5
SphericalBesselBasis
import math import torch import numpy as np class SphericalBesselBasis(torch.nn.Module): """ 1D spherical Bessel basis Parameters ---------- num_radial: int Controls maximum frequency. cutoff: float Cutoff distance in Angstrom. """ def __init__(self, num_radial: 'int', cutoff: 'float'): super().__init__() self.norm_const = math.sqrt(2 / cutoff ** 3) self.frequencies = torch.nn.Parameter(data=torch.tensor(np.pi * np. arange(1, num_radial + 1, dtype=np.float32)), requires_grad=True) def forward(self, d_scaled): return self.norm_const / d_scaled[:, None] * torch.sin(self. frequencies * d_scaled[:, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_radial': 4, 'cutoff': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 0.1767766952966369 tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp0 tmp7 = tl_math.sin(tmp6) tmp8 = tmp4 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_sin_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class SphericalBesselBasisNew(torch.nn.Module): """ 1D spherical Bessel basis Parameters ---------- num_radial: int Controls maximum frequency. cutoff: float Cutoff distance in Angstrom. """ def __init__(self, num_radial: 'int', cutoff: 'float'): super().__init__() self.norm_const = math.sqrt(2 / cutoff ** 3) self.frequencies = torch.nn.Parameter(data=torch.tensor(np.pi * np. arange(1, num_radial + 1, dtype=np.float32)), requires_grad=True) def forward(self, input_0): primals_2 = self.frequencies primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
RolnickLab/ocp
SphericalBesselBasis
false
2,781
[ "MIT" ]
0
e120c3b90203a46f5fc7626f0b5c8979e4944765
https://github.com/RolnickLab/ocp/tree/e120c3b90203a46f5fc7626f0b5c8979e4944765
ISAB
import math import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class ISAB(nn.Module): def __init__(self, d, n_heads, n_inducing_points, layer_norm=False): super().__init__() self.mab0 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.mab1 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.I = nn.Parameter(torch.zeros(1, n_inducing_points, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.I) def forward(self, x, presence=None): batch_size = x.shape[0] h = self.mab0(self.I.repeat(batch_size, 1, 1), x, presence) return self.mab1(x, h) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'n_heads': 4, 'n_inducing_points': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf1, primals_4, buf4, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_4 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(4, 16)](buf2, primals_6, buf5, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_3[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_1[grid(4, 16)](buf3, primals_8, buf9, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_5[grid(64)](buf13, primals_10, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf14) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf15) del primals_13 buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_6[grid(64)](buf13, buf14, primals_12, buf16, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = buf14 del buf14 extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf15, primals_14, buf19, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_14 buf20 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf15 triton_poi_fused_clone_1[grid(4, 16)](buf17, primals_16, buf20, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_16 buf21 = buf7 del buf7 extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf20, (16, 1, 4), (4, 0, 1), 0), out=buf21) buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf21, buf22, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf23 = buf21 del buf21 triton_poi_fused__softmax_3[grid(256)](buf22, buf23, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf22 buf24 = reinterpret_tensor(buf17, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf17 triton_poi_fused_clone_1[grid(4, 16)](buf18, primals_18, buf24, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1) del primals_18 buf25 = reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 1), 0) del buf18 extern_kernels.bmm(buf23, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 0), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf25, buf26, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf27 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0) del buf25 extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0) del buf27 triton_poi_fused_add_5[grid(64)](buf28, primals_20, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf28, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf29) buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_6[grid(64)](buf28, buf29, primals_22, buf30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf29 del primals_22 return (buf30, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor( buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(buf28, (16, 4), (4, 1), 0), buf31, primals_21, primals_19, reinterpret_tensor(buf24, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf19, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0), primals_17, primals_15, buf32, primals_11, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0), primals_3) def qkv_attention(queries, keys, values, presence=None): """ Transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ d_k = queries.shape[-1] routing = torch.matmul(queries, keys.transpose(1, 2)) if presence is not None: routing -= (1.0 - presence.unsqueeze(-2)) * 1e+32 routing = F.softmax(routing / np.sqrt(d_k), -1) return torch.matmul(routing, values) class MultiHeadQKVAttention(nn.Module): """Multi-head version of Transformer-like attention.""" def __init__(self, d_k, d_v, n_heads): super().__init__() self.d_k = d_k self.d_v = d_v self.n_heads = n_heads d_k_p = int(math.ceil(d_k / n_heads)) * n_heads d_v_p = int(math.ceil(d_v / n_heads)) * n_heads self.q_projector = nn.Linear(d_k, d_k_p) self.k_projector = nn.Linear(d_k, d_k_p) self.v_projector = nn.Linear(d_v, d_v_p) self.o_projector = nn.Linear(d_v_p, d_v) def forward(self, queries, keys, values, presence=None): """ Multi-head transformer-like self-attention. Args: queries: Tensor of shape [B, N, d_k]. keys: Tensor of shape [B, M, d_k]. values: : Tensor of shape [B, M, d_v]. presence: None or tensor of shape [B, M]. Returns: Tensor of shape [B, N, d_v] """ assert queries.shape[2] == keys.shape[2] assert keys.shape[1] == values.shape[1] if presence is not None: assert values.shape[:2] == presence.shape B, N, _d_k = queries.shape M, _d_v = values.shape[1:] H = self.n_heads q_p = self.q_projector(queries) k_p = self.k_projector(keys) v_p = self.v_projector(values) del queries, keys, values q = q_p.view(B, N, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, N, -1) k = k_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) v = v_p.view(B, M, H, -1).permute(2, 0, 1, 3).contiguous().view(H * B, M, -1) if presence is not None: presence = presence.repeat(self.n_heads, 1) o = qkv_attention(q, k, v, presence) o = o.view(H, B, N, -1).permute(1, 2, 0, 3).contiguous().view(B, N, -1) return self.o_projector(o) class MAB(nn.Module): def __init__(self, d, n_heads, layer_norm=False): super().__init__() self.layer_norm = layer_norm self.mqkv = MultiHeadQKVAttention(d_k=d, d_v=d, n_heads=n_heads) if layer_norm: self.ln0 = nn.LayerNorm(d) self.ln1 = nn.LayerNorm(d) self.fc = nn.Linear(d, d) def forward(self, queries, keys, presence=None): h = self.mqkv(queries, keys, keys, presence) h = h + queries if presence is not None: assert presence.shape[1] == queries.shape[1] == keys.shape[1] h = h * presence.unsqueeze(-1) if self.layer_norm: h = self.ln0(h) h = h + F.relu(self.fc(h)) if self.layer_norm: h = self.ln1(h) return h class ISABNew(nn.Module): def __init__(self, d, n_heads, n_inducing_points, layer_norm=False): super().__init__() self.mab0 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.mab1 = MAB(d=d, n_heads=n_heads, layer_norm=layer_norm) self.I = nn.Parameter(torch.zeros(1, n_inducing_points, d), requires_grad=True) with torch.no_grad(): nn.init.xavier_uniform_(self.I) def forward(self, input_0): primals_2 = self.I primals_3 = self.mab0.mqkv.q_projector.weight primals_4 = self.mab0.mqkv.q_projector.bias primals_5 = self.mab0.mqkv.k_projector.weight primals_6 = self.mab0.mqkv.k_projector.bias primals_7 = self.mab0.mqkv.v_projector.weight primals_8 = self.mab0.mqkv.v_projector.bias primals_9 = self.mab0.mqkv.o_projector.weight primals_10 = self.mab0.mqkv.o_projector.bias primals_11 = self.mab0.fc.weight primals_12 = self.mab0.fc.bias primals_13 = self.mab1.mqkv.q_projector.weight primals_14 = self.mab1.mqkv.q_projector.bias primals_15 = self.mab1.mqkv.k_projector.weight primals_16 = self.mab1.mqkv.k_projector.bias primals_17 = self.mab1.mqkv.v_projector.weight primals_18 = self.mab1.mqkv.v_projector.bias primals_19 = self.mab1.mqkv.o_projector.weight primals_20 = self.mab1.mqkv.o_projector.bias primals_21 = self.mab1.fc.weight primals_22 = self.mab1.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22]) return output[0]
karayanni/torch-scae
ISAB
false
10,442
[ "Apache-2.0" ]
0
e044662d8942d8d1923d13d071f375144cf4a1e8
https://github.com/karayanni/torch-scae/tree/e044662d8942d8d1923d13d071f375144cf4a1e8
KlCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/g5/cg5f2rptqnpi2mrqpqc4tujqpbrrrjrse6plhgftx425znsffpfv.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uf/cufuuxvelrltzwxjbbkikdu25p6qhp4n3izwy7jc75fb6567imxf.py # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # loss => div_1, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss_1 => mul_2 # softmax => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 1.0), kwargs = {}) triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, loss, log_softmax, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn.modules.loss import _Loss from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 0.25 tmp37 = tmp34 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del buf0 del buf2 return buf4, class Criterion(_Loss): def __init__(self, alpha=1.0, name='criterion'): super().__init__() """Alpha is used to weight each loss term """ self.alpha = alpha self.name = name def forward(self, input, target, weight=None, ignore_index=-1): """weight: sample weight """ return class KlCriterionNew(Criterion): def __init__(self, alpha=1.0, name='KL Div Criterion'): super().__init__() self.alpha = alpha self.name = name def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kiminh/mt-dnn
KlCriterion
false
7,029
[ "MIT" ]
1
133884b380244dbe74acc4d7507e551b2c5035b3
https://github.com/kiminh/mt-dnn/tree/133884b380244dbe74acc4d7507e551b2c5035b3
VarianceNorm2d
import torch import torch.nn as nn class VarianceNorm2d(nn.Module): def __init__(self, num_features, bias=False): super().__init__() self.num_features = num_features self.bias = bias self.alpha = nn.Parameter(torch.zeros(num_features)) self.alpha.data.normal_(1, 0.02) def forward(self, x): vars = torch.var(x, dim=(2, 3), keepdim=True) h = x / torch.sqrt(vars + 1e-05) out = self.alpha.view(-1, self.num_features, 1, 1) * h return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 15.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp23 = tmp0 / tmp21 tmp24 = tmp22 * tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp24, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mul_sqrt_var_0[grid(16)](buf3, primals_1, primals_2, buf4, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 return buf4, primals_1, buf3 class VarianceNorm2dNew(nn.Module): def __init__(self, num_features, bias=False): super().__init__() self.num_features = num_features self.bias = bias self.alpha = nn.Parameter(torch.zeros(num_features)) self.alpha.data.normal_(1, 0.02) def forward(self, input_0): primals_2 = self.alpha primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
DeepTitan/PNDM
VarianceNorm2d
false
13,568
[ "Apache-2.0" ]
61
4037a4f40011c9a0d47b92303e64d47fcc7ed56a
https://github.com/DeepTitan/PNDM/tree/4037a4f40011c9a0d47b92303e64d47fcc7ed56a
Downsample
import torch import torch.nn as nn def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2): super().__init__() self.channels = channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd(dims, channels, channels, 3, stride=stride, padding=1) else: self.op = avg_pool_nd(stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'use_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class DownsampleNew(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2): super().__init__() self.channels = channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd(dims, channels, channels, 3, stride=stride, padding=1) else: self.op = avg_pool_nd(stride) def forward(self, input_0): primals_2 = self.op.weight primals_3 = self.op.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
francoismaze/improved-diffusion
Downsample
false
10,076
[ "MIT" ]
0
bb403ba2437d6d834bb285b7259549fb3fa40f1b
https://github.com/francoismaze/improved-diffusion/tree/bb403ba2437d6d834bb285b7259549fb3fa40f1b
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/tt/ctthexrcgmoykvsyasq7xirwxi6m3yxgjocmuvarikaawgqvdiws.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ty/ctyzpua7fjfn7hztwdpigb3gi7yf6hmfsexovkmknohzjtrj2z3h.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlockNew(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlockNew, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sid1057/sid1057.github.io
ConvBlock
false
17,936
[ "MIT" ]
4
623d1731e308b42b6f86304dcfd671a061b414bf
https://github.com/Sid1057/sid1057.github.io/tree/623d1731e308b42b6f86304dcfd671a061b414bf
LayerScaleBlock
import torch import torch.nn as nn def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class TalkingHeadAttn(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScaleBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=0.0001): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block(in_features=dim, hidden_features= mlp_hidden_dim, act_layer=act_layer, drop=drop) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) def forward(self, x): y = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) z = y + self.drop_path(self.gamma_2 * self.mlp(self.norm2(y))) return y, z def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = tmp1 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_gelu_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (12, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK =4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64, 4)](buf6, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0) del buf6 extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_5[grid(64)](buf8, primals_7, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(256)](buf8, primals_7, buf9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 16)](buf12, primals_9, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_9 buf14 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf9 triton_poi_fused_clone_8[grid(16, 4)](buf3, buf14, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf15 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0) del buf10 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_9[grid(16, 4)](buf15, buf16, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0) del buf15 extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_10[grid(64)](primals_4, primals_1, buf17, primals_11, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = buf1 del buf1 buf20 = buf0 del buf0 triton_poi_fused_native_layer_norm_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf18, buf19, buf20, primals_13, primals_14, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf19 del buf20 del primals_14 buf22 = reinterpret_tensor(buf12, (16, 16), (16, 1), 0) del buf12 extern_kernels.addmm(primals_16, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf22) del primals_16 buf23 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_11[grid(256)](buf22, buf23, 256, XBLOCK=256, num_warps=4, num_stages=1) buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_18, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf24) del primals_18 buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_12[grid(64)](buf18, primals_12, buf24, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf18, buf25, primals_1, primals_4, primals_7, primals_11, primals_12, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor( buf16, (16, 4), (4, 1), 0), buf17, buf18, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), buf22, reinterpret_tensor(buf23, (16, 16), (16, 1), 0), buf24, primals_17, primals_15, primals_10, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), primals_8, primals_6, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_5) def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class TalkingHeadAttn(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScaleBlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=0.0001): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block(in_features=dim, hidden_features= mlp_hidden_dim, act_layer=act_layer, drop=drop) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim), requires_grad=True) def forward(self, input_0): primals_1 = self.gamma_1 primals_2 = self.gamma_2 primals_3 = self.norm1.weight primals_7 = self.norm1.bias primals_5 = self.attn.qkv.weight primals_6 = self.attn.proj.weight primals_9 = self.attn.proj.bias primals_8 = self.attn.proj_l.weight primals_11 = self.attn.proj_l.bias primals_10 = self.attn.proj_w.weight primals_12 = self.attn.proj_w.bias primals_13 = self.norm2.weight primals_14 = self.norm2.bias primals_15 = self.mlp.fc1.weight primals_16 = self.mlp.fc1.bias primals_17 = self.mlp.fc2.weight primals_18 = self.mlp.fc2.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0], output[1]
yifanc96/yifanc-DL
LayerScaleBlock
false
11,101
[ "MIT" ]
0
25d56cec776fb151c8f6bcbd997bca94f07f3597
https://github.com/yifanc96/yifanc-DL/tree/25d56cec776fb151c8f6bcbd997bca94f07f3597
JaccardLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3u/c3u5ymm2y7v72mwkeem6ekyshydqrrvo7wiykmctvcwkoun4ir7s.py # Topologically Sorted Source Nodes: [mul, intersection, add, add_1, sum_3, sub, add_2, truediv, losses, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # intersection => sum_2 # losses => sub_1 # mean => mean # mul => mul # sub => sub # sum_3 => sum_3 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 0.001), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %sum_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 0.001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tmp0 + tmp1 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 0.001 tmp11 = tmp5 + tmp10 tmp12 = tmp9 - tmp5 tmp13 = tmp12 + tmp10 tmp14 = tmp11 / tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp16 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, ), (1, ), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul, intersection, add, add_1, sum_3, sub, add_2, truediv, losses, mean], Original ATen: [aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sub_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tmp0 + tmp1 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 0.001 tmp11 = tmp5 + tmp10 tmp12 = tmp9 - tmp5 tmp13 = tmp12 + tmp10 tmp14 = tmp11 / tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp16 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1,), (1,), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0) del buf0 get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, def jaccard(outputs, targets, per_image=False, non_empty=False, min_pixels=5): batch_size = outputs.size()[0] eps = 0.001 if not per_image: batch_size = 1 dice_target = targets.contiguous().view(batch_size, -1).float() dice_output = outputs.contiguous().view(batch_size, -1) target_sum = torch.sum(dice_target, dim=1) intersection = torch.sum(dice_output * dice_target, dim=1) losses = 1 - (intersection + eps) / (torch.sum(dice_output + dice_target, dim=1) - intersection + eps) if non_empty: assert per_image is True non_empty_images = 0 sum_loss = 0 for i in range(batch_size): if target_sum[i] > min_pixels: sum_loss += losses[i] non_empty_images += 1 if non_empty_images == 0: return 0 else: return sum_loss / non_empty_images return losses.mean() class JaccardLossNew(nn.Module): def __init__(self, weight=None, size_average=True, per_image=False, non_empty=False, apply_sigmoid=False, min_pixels=5): super().__init__() self.size_average = size_average self.register_buffer('weight', weight) self.per_image = per_image self.non_empty = non_empty self.apply_sigmoid = apply_sigmoid self.min_pixels = min_pixels def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ktncktnc/SpaceNet_Off_Nadir_Solutions
JaccardLoss
false
15,860
[ "Apache-2.0" ]
164
2a9ef1c3b72fb749c808ddb8593a85cb16b9f1ca
https://github.com/ktncktnc/SpaceNet_Off_Nadir_Solutions/tree/2a9ef1c3b72fb749c808ddb8593a85cb16b9f1ca
TwoMLPHead
import torch from torch import nn import torch.nn.functional as F class TwoMLPHead(nn.Module): """ Standard heads for FPN-based models Arguments: in_channels (int): number of input channels representation_size (int): size of the intermediate representation """ def __init__(self, in_channels, representation_size): super(TwoMLPHead, self).__init__() self.fc6 = nn.Linear(in_channels, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) def forward(self, x): x = x.flatten(start_dim=1) x = F.relu(self.fc6(x)) x = F.relu(self.fc7(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'representation_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, buf1, buf4, primals_4 class TwoMLPHeadNew(nn.Module): """ Standard heads for FPN-based models Arguments: in_channels (int): number of input channels representation_size (int): size of the intermediate representation """ def __init__(self, in_channels, representation_size): super(TwoMLPHeadNew, self).__init__() self.fc6 = nn.Linear(in_channels, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) def forward(self, input_0): primals_1 = self.fc6.weight primals_3 = self.fc6.bias primals_2 = self.fc7.weight primals_5 = self.fc7.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
GreenCUBIC/GasBotty
TwoMLPHead
false
9,528
[ "MIT" ]
0
158f5991201c80bf4cbbbb9deabc9954ff19bbb1
https://github.com/GreenCUBIC/GasBotty/tree/158f5991201c80bf4cbbbb9deabc9954ff19bbb1
CosineBasisLinear
import torch import numpy as np from torch import nn def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinear(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, x): """Evaluate. Args: x (torch.Tensor): Input. Returns: torch.Tensor: Output with shape of (x.shape + (out_size,)). """ h = cosine_basis_functions(x, self.n_basis_functions) h = h.reshape(-1, self.n_basis_functions) out = self.linear(h) out = out.reshape(*x.shape, self.out_size) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_basis_functions': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_arange_cos_mul_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (256, 4), (4, 1), 0) def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinearNew(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
g-votte/pfrl
CosineBasisLinear
false
15,381
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
TwoLayerCNN
import torch import torch.nn as nn import torch.nn.functional as F class TwoLayerCNN(nn.Module): def __init__(self, C, M, embedding, channel, mtc_input, *args, **kwargs): super(TwoLayerCNN, self).__init__() self.C = C self.M = M self.embedding = embedding self.mtc_input = C if mtc_input else 1 self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1, bias=False) self.flat_size = M // 2 * C // self.mtc_input * channel self.fc1 = nn.Linear(self.flat_size, embedding) def forward(self, x): N = len(x) x = x.view(-1, self.mtc_input, self.M) x = F.relu(self.conv1(x)) x = F.max_pool1d(x, 2) x = x.view(N, self.flat_size) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'C': 4, 'M': 4, 'embedding': 4, 'channel': 4, 'mtc_input': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.int8) buf3 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(32)](buf1, buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (4, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha =1, beta=1, out=buf4) del primals_4 return buf4, primals_2, primals_1, reinterpret_tensor(buf1, (4, 4, 1, 4 ), (16, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 8), (8, 1), 0 ), primals_3, buf5 class TwoLayerCNNNew(nn.Module): def __init__(self, C, M, embedding, channel, mtc_input, *args, **kwargs): super(TwoLayerCNNNew, self).__init__() self.C = C self.M = M self.embedding = embedding self.mtc_input = C if mtc_input else 1 self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1, bias=False) self.flat_size = M // 2 * C // self.mtc_input * channel self.fc1 = nn.Linear(self.flat_size, embedding) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
imvladikon/string-embed
TwoLayerCNN
false
3,669
[ "MIT" ]
0
49e5ab0ada37b497dac51974aff16eeac65627a0
https://github.com/imvladikon/string-embed/tree/49e5ab0ada37b497dac51974aff16eeac65627a0
BasicNN
# AOT ID: ['1_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wm/cwmlpspwkzmiv4efvkrjowkoawwpe42tnpzkiacrjumkzfjsy3qc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] # Source node to ATen node mapping: # x_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, 255.0), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/sp/cspbvdtjzgrogiqrdvxgtnc5djfqn22gfjy4t6u5773zfamx7rey.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div_1, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (2, 784), (784, 1)) assert_size_stride(primals_3, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784), (0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf1, buf4, 1, 2, grid=grid(1), stream=stream0) del buf1 return (buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (2, 784), (784, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch. float32) get_raw_stream(0) triton_poi_fused_div_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784), (0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32) triton_per_fused__softmax_1[grid(1)](buf1, buf4, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4 class BasicNNNew(nn.Module): def __init__(self): super(BasicNNNew, self).__init__() self.net = nn.Linear(28 * 28, 2) def forward(self, input_0): primals_2 = self.net.weight primals_3 = self.net.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dbczumar/clipper
BasicNN
false
3,401
[ "Apache-2.0" ]
0
80c97d27a38d60caaebb2a1ae6a995dd7ff1c82d
https://github.com/dbczumar/clipper/tree/80c97d27a38d60caaebb2a1ae6a995dd7ff1c82d
JointBoneLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fm/cfm6a6dnefwe7wmwtjo6xtq4yv4kexu4kskkj242y6hsuzo4qect.py # Topologically Sorted Source Nodes: [getitem, getitem_1, sub, J, getitem_2, getitem_3, sub_1, Y, sub_2, loss, sum_1, truediv, loss_1], Original ATen: [aten.index, aten.sub, aten.linalg_vector_norm, aten.abs, aten.sum, aten.div] # Source node to ATen node mapping: # J => pow_1, pow_2, sum_1 # Y => pow_3, pow_4, sum_2 # getitem => index # getitem_1 => index_1 # getitem_2 => index_2 # getitem_3 => index_3 # loss => abs_1 # loss_1 => div_1 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sum_1 => sum_3 # truediv => div # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index, %index_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [None, %lift_fresh_copy_2]), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [None, %lift_fresh_copy_3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index_2, %index_3), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 6), kwargs = {}) triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0 = async_compile.triton('triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 128], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = (rindex // 4) % 6 r0 = rindex % 4 r2 = (rindex // 24) r3 = rindex tmp0 = r1 tmp1 = tl.full([1, 1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1, 1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1, 1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1, 1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1, 1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + ((4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + ((4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp17 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tl.load(in_ptr0 + (1 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp27 = tl.load(in_ptr0 + (1 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.load(in_ptr0 + (2 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr0 + (2 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp31 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp36 = tl.load(in_ptr0 + (3 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr0 + (3 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tl.load(in_ptr1 + ((4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr1 + ((4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tmp43 * tmp43 tmp45 = tl.load(in_ptr1 + (1 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr1 + (1 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tmp47 * tmp47 tmp49 = tmp44 + tmp48 tmp50 = tl.load(in_ptr1 + (2 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr1 + (2 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 - tmp51 tmp53 = tmp52 * tmp52 tmp54 = tmp49 + tmp53 tmp55 = tl.load(in_ptr1 + (3 + (4*r0) + (16*tmp16) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr1 + (3 + (4*r0) + (16*tmp22) + (64*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tmp54 + tmp58 tmp60 = libdevice.sqrt(tmp40) tmp61 = libdevice.sqrt(tmp59) tmp62 = tmp60 - tmp61 tmp63 = tl_math.abs(tmp62) tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask, tmp66, _tmp65) tmp65 = tl.sum(_tmp65, 1)[:, None] tmp67 = 0.25 tmp68 = tmp65 * tmp67 tmp69 = 0.16666666666666666 tmp70 = tmp68 * tmp69 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp70, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [getitem, getitem_1, sub, J, getitem_2, getitem_3, sub_1, Y, sub_2, loss, sum_1, truediv, loss_1], Original ATen: [aten.index, aten.sub, aten.linalg_vector_norm, aten.abs, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0.run(buf3, arg0_1, arg1_1, 1, 96, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 96 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex // 4 % 6 r0 = rindex % 4 r2 = rindex // 24 tmp0 = r1 tmp1 = tl.full([1, 1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1, 1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1, 1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1, 1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1, 1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp17 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tl.load(in_ptr0 + (1 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp27 = tl.load(in_ptr0 + (1 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.load(in_ptr0 + (2 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr0 + (2 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp31 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp36 = tl.load(in_ptr0 + (3 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr0 + (3 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tl.load(in_ptr1 + (4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr1 + (4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tmp43 * tmp43 tmp45 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tmp47 * tmp47 tmp49 = tmp44 + tmp48 tmp50 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 - tmp51 tmp53 = tmp52 * tmp52 tmp54 = tmp49 + tmp53 tmp55 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tmp54 + tmp58 tmp60 = libdevice.sqrt(tmp40) tmp61 = libdevice.sqrt(tmp59) tmp62 = tmp60 - tmp61 tmp63 = tl_math.abs(tmp62) tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask, tmp66, _tmp65) tmp65 = tl.sum(_tmp65, 1)[:, None] tmp67 = 0.25 tmp68 = tmp65 * tmp67 tmp69 = 0.16666666666666666 tmp70 = tmp68 * tmp69 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp70, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0[grid(1)]( buf3, arg0_1, arg1_1, 1, 96, XBLOCK=1, RBLOCK=64, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class JointBoneLossNew(torch.nn.Module): def __init__(self, joint_num): super(JointBoneLossNew, self).__init__() id_i, id_j = [], [] for i in range(joint_num): for j in range(i + 1, joint_num): id_i.append(i) id_j.append(j) self.id_i = id_i self.id_j = id_j def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SevenMoGod/movenet.pytorch
JointBoneLoss
false
14,395
[ "MIT" ]
87
95ec8535245228aa4335243e68722810e50bcaf8
https://github.com/SevenMoGod/movenet.pytorch/tree/95ec8535245228aa4335243e68722810e50bcaf8
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/vw/cvwklowagvx5sksvhxwnsr3o6vulpztvjpqkljpmfcmgvcblg6xo.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 400, grid=grid(400), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4, 5), (100, 25, 5, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(400)](buf1, primals_2, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 5), (100, 25, 5, 1), 0 ), primals_1, primals_3 class Conv2dNew(nn.Conv2d): """ :param in_channels: Scalar :param out_channels: Scalar :param kernel_size: Scalar :param activation_fn: activation function :param drop_rate: Scalar. dropout rate :param stride: Scalar :param padding: padding type :param dilation: Scalar :param groups: Scalar. :param bias: Boolean. :param bn: Boolean. whether it uses batch normalization """ def __init__(self, in_channels, out_channels, kernel_size, activation_fn=None, drop_rate=0.0, stride=1, padding='same', dilation=1, groups=1, bias=True, bn=False): self.activation_fn = activation_fn self.drop_rate = drop_rate if padding == 'same': padding = kernel_size // 2 * dilation self.even_kernel = not bool(kernel_size % 2) super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None self.batch_norm = nn.BatchNorm2d(out_channels, eps=0.001, momentum= 0.001) if bn else None def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CookiePPP/mellotron
Conv2d
false
9,058
[ "BSD-3-Clause" ]
0
488425981c19cd0eddddea13d1348da4bfef8d26
https://github.com/CookiePPP/mellotron/tree/488425981c19cd0eddddea13d1348da4bfef8d26
Mish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nf/cnfomfgkrddid5xfz2abcevz3avk7wtz7mughpoinpbdg3kjz27l.py # Topologically Sorted Source Nodes: [softplus, tanh, mul_], Original ATen: [aten.softplus, aten.tanh, aten.mul] # Source node to ATen node mapping: # mul_ => mul # softplus => exp, gt, log1p, where # tanh => tanh # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %tanh), kwargs = {}) # %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %mul), kwargs = {}) triton_poi_fused_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_mul_softplus_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_softplus_tanh_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr1 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [softplus, tanh, mul_], Original ATen: [aten.softplus, aten.tanh, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) return (arg0_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.optim.lr_scheduler import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return arg0_1, class MishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
csharpshooter/DeepLearning
Mish
false
1,747
[ "MIT" ]
0
c1d20660c32076468970f7376931e1fcd0d2644e
https://github.com/csharpshooter/DeepLearning/tree/c1d20660c32076468970f7376931e1fcd0d2644e
SphereLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/54/c54anezm5tp7djzvtp7bcl42ylzihy5446xuvla3ymvra5m7c4ma.py # Topologically Sorted Source Nodes: [x_norm_1], Original ATen: [aten.div] # Source node to ATen node mapping: # x_norm_1 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/52/c52poxxuyzi26vb6oxpqnooguyjqzr62bsuca6tf73qb4cnpqt7y.py # Topologically Sorted Source Nodes: [w_norm_1], Original ATen: [aten.div] # Source node to ATen node mapping: # w_norm_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rh/crhl52hzepf4uwf2gepe3xefe3htjq2pvyzmbzyufavdxzhczb56.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 14), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 14.0 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ia/ciaslwmz6eghlz44hmcrzjkybxoxqhu46c4ud6lpguuy7zetvqal.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div_2, exp, log, mul_1, neg, sub_1, sum_3, sum_4 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_3), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_4,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 4), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_3 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r2), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = -tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_norm_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [w_norm_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [w_norm_1, cos_th], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_3.run(buf5, buf3, primals_3, 1, 16, grid=grid(1), stream=stream0) del buf3 return (buf5, primals_2, primals_3, buf2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 14.0 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + r2, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = -tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1 del buf1 triton_poi_fused_2[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused__log_softmax_div_mul_neg_sum_3[grid(1)](buf5, buf3, primals_3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf5, primals_2, primals_3, buf2, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class SphereLossNew(nn.Module): def __init__(self, in_feats, n_classes, scale=14, *args, **kwargs): super(SphereLossNew, self).__init__(*args, **kwargs) self.scale = scale self.cross_entropy = nn.CrossEntropyLoss() self.W = torch.nn.Parameter(torch.randn(in_feats, n_classes), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ace19-dev/image-retrieval-pytorch
SphereLoss
false
18,236
[ "MIT" ]
9
19bd4ae5efea5b6184c345f693646bcd9a0fc8cf
https://github.com/ace19-dev/image-retrieval-pytorch/tree/19bd4ae5efea5b6184c345f693646bcd9a0fc8cf
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/d2/cd2onkfnnqqav3af2iyomd6l7n3rqdwwvxsyw3ac7zfntydugnbi.py # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # x => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nd/cndmfvh7lrojuqo776y2t4r3iwqonobmuxwcqkx2pam6myjioeyl.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add] # Source node to ATen node mapping: # x_3 => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_8), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0.run(buf2, primals_4, buf10, 256, grid=grid(256), stream=stream0) del primals_4 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_5, out=buf3) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_1, x_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_0.run(buf5, primals_6, buf9, 256, grid=grid(256), stream=stream0) del primals_6 buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [support_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_7, out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) del buf6 buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf8, primals_8, 256, grid=grid(256), stream=stream0) del primals_8 return (reinterpret_tensor(buf8, (4, 64), (64, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 64), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), buf9, reinterpret_tensor(buf2, (4, 64), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), buf10, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math from torch.nn.modules import Module from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2, primals_4, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_5, out=buf3) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf5, primals_6, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_7, out=buf6) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) del buf6 buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused_add_1[grid(256)](buf8, primals_8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 return reinterpret_tensor(buf8, (4, 64), (64, 1), 0), reinterpret_tensor( primals_1, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 64), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), buf9, reinterpret_tensor(buf2, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), buf10, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 Z = f(X, A) = softmax(A` * ReLU(A` * X * W0)* W1) A` = D'^(-0.5) * A * D'^(-0.5) """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.matmul(input, self.weight) output = torch.matmul(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(Module): """ simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, nfeat, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nfeat) self.gc2 = GraphConvolution(nfeat, nfeat) self.gc3 = GraphConvolution(nfeat, nfeat) self.dropout = dropout def forward(self, input_0, input_1): primals_2 = self.gc1.weight primals_4 = self.gc1.bias primals_5 = self.gc2.weight primals_6 = self.gc2.bias primals_7 = self.gc3.weight primals_8 = self.gc3.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Brain03Yao/M2TGCN
GCN
false
17,008
[ "MIT" ]
6
72c65687fa52c618740cd6d1db7366116f68398c
https://github.com/Brain03Yao/M2TGCN/tree/72c65687fa52c618740cd6d1db7366116f68398c
Fusion
import torch import torch.nn as nn import torch.utils.checkpoint class Fusion(nn.Module): """ The subnetwork that is used in TFN for video and audio in the pre-fusion stage """ def __init__(self, in_size, hidden_size, n_class, dropout, modal_name= 'text'): """ Args: in_size: input dimension hidden_size: hidden layer dimension dropout: dropout probability Output: (return value in forward) a tensor of shape (batch_size, hidden_size) """ super(Fusion, self).__init__() self.drop = nn.Dropout(p=dropout) self.linear_1 = nn.Linear(in_size, hidden_size) self.linear_2 = nn.Linear(hidden_size, hidden_size) self.linear_3 = nn.Linear(hidden_size, n_class) def forward(self, x): """ Args: x: tensor of shape (batch_size, in_size) """ dropped = self.drop(x) y_1 = torch.tanh(self.linear_1(dropped)) self.linear_2(y_1) y_2 = torch.tanh(self.linear_2(y_1)) y_3 = self.linear_3(y_2) return y_2, y_3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'hidden_size': 4, 'n_class': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf3, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class FusionNew(nn.Module): """ The subnetwork that is used in TFN for video and audio in the pre-fusion stage """ def __init__(self, in_size, hidden_size, n_class, dropout, modal_name= 'text'): """ Args: in_size: input dimension hidden_size: hidden layer dimension dropout: dropout probability Output: (return value in forward) a tensor of shape (batch_size, hidden_size) """ super(FusionNew, self).__init__() self.drop = nn.Dropout(p=dropout) self.linear_1 = nn.Linear(in_size, hidden_size) self.linear_2 = nn.Linear(hidden_size, hidden_size) self.linear_3 = nn.Linear(hidden_size, n_class) def forward(self, input_0): primals_2 = self.linear_1.weight primals_3 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_6 = self.linear_3.weight primals_7 = self.linear_3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
dumpmemory/MMSA
Fusion
false
6,613
[ "MIT" ]
1
08b3a7f4529c380356eeb1cf6bf9a89e7c9701e7
https://github.com/dumpmemory/MMSA/tree/08b3a7f4529c380356eeb1cf6bf9a89e7c9701e7
Net
import torch import torch.nn as nn import torch.nn.functional class Net(nn.Module): def __init__(self, num_inputs=784, num_outputs=10, num_hiddens=256, is_training=True): super(Net, self).__init__() self.num_inputs = num_inputs self.num_outputs = num_outputs self.num_hiddens = num_hiddens self.linear_1 = nn.Linear(num_inputs, num_hiddens) self.linear_2 = nn.Linear(num_hiddens, num_outputs) self.relu = nn.ReLU() def forward(self, X): X = X.reshape((-1, self.num_inputs)) H1 = self.relu(self.linear_1(X)) out = self.linear_2(H1) return out def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (256, 784), (784, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (10, 256), (256, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 256), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 class NetNew(nn.Module): def __init__(self, num_inputs=784, num_outputs=10, num_hiddens=256, is_training=True): super(NetNew, self).__init__() self.num_inputs = num_inputs self.num_outputs = num_outputs self.num_hiddens = num_hiddens self.linear_1 = nn.Linear(num_inputs, num_hiddens) self.linear_2 = nn.Linear(num_hiddens, num_outputs) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.linear_1.weight primals_3 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
samjz06/d2l-pytorch
Net
false
7,593
[ "Apache-2.0" ]
1
80eca3f7d217eefb4f6ae08aae24c6a3c2714898
https://github.com/samjz06/d2l-pytorch/tree/80eca3f7d217eefb4f6ae08aae24c6a3c2714898
BinaryLogisticRegressionLoss
import torch import torch.nn as nn def binary_logistic_regression_loss(reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05): """Binary Logistic Regression Loss.""" label = label.view(-1) reg_score = reg_score.contiguous().view(-1) pmask = (label > threshold).float() num_positive = max(torch.sum(pmask), 1) num_entries = len(label) ratio = num_entries / num_positive ratio = min(max(ratio, ratio_range[0]), ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask ) * torch.log(1.0 - reg_score + eps) loss = -torch.mean(loss) return loss class BinaryLogisticRegressionLoss(nn.Module): """Binary Logistic Regression Loss. It will calculate binary logistic regression loss given reg_score and label. """ def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05): """Calculate Binary Logistic Regression Loss. Args: reg_score (torch.Tensor): Predicted score by model. label (torch.Tensor): Groundtruth labels. threshold (float): Threshold for positive instances. Default: 0.5. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5. Returns: torch.Tensor: Returned binary logistic loss. """ return binary_logistic_regression_loss(reg_score, label, threshold, ratio_range, eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp19 = tl.load(in_ptr1 + r0, None) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 256.0 tmp12 = tmp10 * tmp11 tmp13 = 1.05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 21.0 tmp16 = triton_helpers.minimum(tmp14, tmp15) tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp3 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp18 * tmp22 tmp24 = tmp16 - tmp7 tmp25 = tmp17 / tmp24 tmp26 = tmp7 - tmp3 tmp27 = tmp25 * tmp26 tmp28 = tmp7 - tmp19 tmp29 = tmp28 + tmp20 tmp30 = tl_math.log(tmp29) tmp31 = tmp27 * tmp30 tmp32 = tmp23 + tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = tmp35 / tmp11 tmp37 = -tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0[ grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, def binary_logistic_regression_loss(reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05): """Binary Logistic Regression Loss.""" label = label.view(-1) reg_score = reg_score.contiguous().view(-1) pmask = (label > threshold).float() num_positive = max(torch.sum(pmask), 1) num_entries = len(label) ratio = num_entries / num_positive ratio = min(max(ratio, ratio_range[0]), ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask ) * torch.log(1.0 - reg_score + eps) loss = -torch.mean(loss) return loss class BinaryLogisticRegressionLossNew(nn.Module): """Binary Logistic Regression Loss. It will calculate binary logistic regression loss given reg_score and label. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
scenarios/dev
BinaryLogisticRegressionLoss
false
4,455
[ "Apache-2.0" ]
0
9f91ebc142cea1c31231d233571ad59460ab6fba
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
UpConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rc/crcgvn2t5cupsa3uzyzlz4vj5mtkxy2mopv5gmyjpcsh5voeodtr.py # Topologically Sorted Source Nodes: [out, inputs], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # inputs => _unsafe_index_1, _unsafe_index_2 # out => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_1, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused__unsafe_index_reflection_pad2d_0 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 10) % 10 x0 = xindex % 10 x2 = (xindex // 100) x5 = xindex tmp0 = 7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cr/ccrfebtywtkn5ct3gnhnmnfdgmakzatlpfhdiithbltpuxxs3veo.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => expm1, gt, mul_4, mul_6, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_4,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_4, %mul_6), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out, inputs], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0.run(primals_1, buf0, 1600, grid=grid(1600), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 % 10 x0 = xindex % 10 x2 = xindex // 100 x5 = xindex tmp0 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch. float32) get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(1600)](primals_1 , buf0, 1600, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class UpConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(UpConvNew, self).__init__() self.upsamp = nn.UpsamplingNearest2d(scale_factor=2) self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.elu = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
UpConv
false
16,287
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
SSIM
import torch import torch.nn as nn class SSIM(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIM, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, x, y): x = self.refl(x) y = self.refl(y) mu_x = self.mu_x_pool(x) mu_y = self.mu_y_pool(y) sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tmp21 = tmp20 + tmp19 tmp23 = tmp22 + tmp21 tmp25 = tmp24 + tmp23 tmp27 = tmp26 + tmp25 tmp29 = tmp28 + tmp27 tmp31 = tmp30 + tmp29 tmp33 = tmp32 + tmp31 tmp35 = tmp34 + tmp33 tmp36 = tmp35 * tmp17 tmp37 = tmp19 * tmp19 tmp38 = tmp20 * tmp20 tmp39 = tmp38 + tmp37 tmp40 = tmp22 * tmp22 tmp41 = tmp40 + tmp39 tmp42 = tmp24 * tmp24 tmp43 = tmp42 + tmp41 tmp44 = tmp26 * tmp26 tmp45 = tmp44 + tmp43 tmp46 = tmp28 * tmp28 tmp47 = tmp46 + tmp45 tmp48 = tmp30 * tmp30 tmp49 = tmp48 + tmp47 tmp50 = tmp32 * tmp32 tmp51 = tmp50 + tmp49 tmp52 = tmp34 * tmp34 tmp53 = tmp52 + tmp51 tmp54 = tmp53 * tmp17 tmp57 = tmp56 + tmp55 tmp59 = tmp58 + tmp57 tmp61 = tmp60 + tmp59 tmp63 = tmp62 + tmp61 tmp65 = tmp64 + tmp63 tmp67 = tmp66 + tmp65 tmp69 = tmp68 + tmp67 tmp71 = tmp70 + tmp69 tmp72 = tmp71 * tmp17 tmp73 = tmp55 * tmp55 tmp74 = tmp56 * tmp56 tmp75 = tmp74 + tmp73 tmp76 = tmp58 * tmp58 tmp77 = tmp76 + tmp75 tmp78 = tmp60 * tmp60 tmp79 = tmp78 + tmp77 tmp80 = tmp62 * tmp62 tmp81 = tmp80 + tmp79 tmp82 = tmp64 * tmp64 tmp83 = tmp82 + tmp81 tmp84 = tmp66 * tmp66 tmp85 = tmp84 + tmp83 tmp86 = tmp68 * tmp68 tmp87 = tmp86 + tmp85 tmp88 = tmp70 * tmp70 tmp89 = tmp88 + tmp87 tmp90 = tmp89 * tmp17 tmp91 = 2.0 tmp92 = tmp36 * tmp91 tmp93 = tmp92 * tmp72 tmp94 = 0.0001 tmp95 = tmp93 + tmp94 tmp96 = tmp36 * tmp72 tmp97 = tmp18 - tmp96 tmp98 = tmp97 * tmp91 tmp99 = 0.0009 tmp100 = tmp98 + tmp99 tmp101 = tmp95 * tmp100 tmp102 = tmp36 * tmp36 tmp103 = tmp72 * tmp72 tmp104 = tmp102 + tmp103 tmp105 = tmp104 + tmp94 tmp106 = tmp54 - tmp102 tmp107 = tmp90 - tmp103 tmp108 = tmp106 + tmp107 tmp109 = tmp108 + tmp99 tmp110 = tmp105 * tmp109 tmp111 = tmp101 / tmp110 tmp112 = 1.0 tmp113 = tmp112 - tmp111 tmp114 = 0.5 tmp115 = tmp113 * tmp114 tmp116 = 0.0 tmp117 = triton_helpers.maximum(tmp115, tmp116) tmp118 = triton_helpers.minimum(tmp117, tmp112) tl.store(in_out_ptr0 + x3, tmp118, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf0 del buf0 buf7 = buf6 del buf6 triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[ grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf7, class SSIMNew(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIMNew, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Morbotu/drone-PWS
SSIM
false
11,724
[ "MIT" ]
0
face9cbf30a55783592cce8af59c1c70da982b6a
https://github.com/Morbotu/drone-PWS/tree/face9cbf30a55783592cce8af59c1c70da982b6a
NeuralNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6 class NeuralNetNew(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNetNew, self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AlejandroE25/pytorch-chatbot
NeuralNet
false
8,836
[ "MIT" ]
0
b9d7926f2f897f3a8513e8796b38f928715738af
https://github.com/AlejandroE25/pytorch-chatbot/tree/b9d7926f2f897f3a8513e8796b38f928715738af
VarianceC
import torch import torch.utils.data import torch import torch.nn as nn class VarianceC(nn.Module): def __init__(self): super(VarianceC, self).__init__() def forward(self, x): mean_x = torch.mean(x, dim=1, keepdim=True) sub_x = x.sub(mean_x) x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_mul_sub_0[grid(64)](arg0_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 return buf0, class VarianceCNew(nn.Module): def __init__(self): super(VarianceCNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
caixin1998/pl-template
VarianceC
false
10,112
[ "BSD-3-Clause" ]
0
6918f0289ab2b32d107e5722617d25c9a683399c
https://github.com/caixin1998/pl-template/tree/6918f0289ab2b32d107e5722617d25c9a683399c
Conv1dCompression
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bs/cbstxeghddltznr7shuzsnth6ngv6mnftr2w7pqzzm5flm72plbl.py # Topologically Sorted Source Nodes: [c_mem], Original ATen: [aten.convolution] # Source node to ATen node mapping: # c_mem => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [c_mem], Original ATen: [aten.convolution] # Source node to ATen node mapping: # c_mem => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_mem], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [c_mem], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [c_mem], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (1, 4, 4), (1, 4, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (1, 4, 4), (1, 4, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0) class Conv1dCompressionNew(Module): """ ## 1D Convolution Compression $f_c$ This is a simple wrapper around [`nn.Conv1d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html) with some tensor dimension permutations. """ def __init__(self, compression_rate: 'int', d_model: 'int'): """ * `compression_rate` $c$ * `d_model` is the embedding size """ super().__init__() self.conv = nn.Conv1d(d_model, d_model, kernel_size= compression_rate, stride=compression_rate) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Aarsh2001/annotated_deep_learning_paper_implementations
Conv1dCompression
false
4,786
[ "MIT" ]
1
ff0d5c065da1a46769f5f66fddc252c178f8fa37
https://github.com/Aarsh2001/annotated_deep_learning_paper_implementations/tree/ff0d5c065da1a46769f5f66fddc252c178f8fa37
NormalSampler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/gi/cgif32a2wfs45bh7axmyrqfirotrk647j5jhcoati6xtmmnm2id6.py # Topologically Sorted Source Nodes: [mul, std, z], Original ATen: [aten.mul, aten.exp, aten.addcmul] # Source node to ATen node mapping: # mul => mul # std => exp # z => add, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %randn), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_2), kwargs = {}) triton_poi_fused_addcmul_exp_mul_0 = async_compile.triton('triton_poi_fused_addcmul_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_addcmul_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_addcmul_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp7 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = tmp0 + tmp8 tl.store(in_out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.randn] buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, std, z], Original ATen: [aten.mul, aten.exp, aten.addcmul] stream0 = get_raw_stream(0) triton_poi_fused_addcmul_exp_mul_0.run(buf2, arg0_1, arg1_1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_addcmul_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = tmp0 + tmp8 tl.store(in_out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], device=device( type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_addcmul_exp_mul_0[grid(256)](buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf2, class NormalSamplerNew(nn.Module): """p(z)""" def __init__(self): super(NormalSamplerNew, self).__init__() self.register_buffer('eps', torch.tensor(1e-10)) def kl_divergence(self, mean, log_var, z): """ L elbo(x) = Eq(z|x)[log p(x|z)] - KL(q(z|x)||p(z)) D_{KL}(q(z|x)||p(z)) """ return -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp(), dim=1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ChengF-Lab/scGGN
NormalSampler
false
2,099
[ "MIT" ]
0
eab585219e6d3eb06c94057f0e3b276d1846e8b6
https://github.com/ChengF-Lab/scGGN/tree/eab585219e6d3eb06c94057f0e3b276d1846e8b6
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/iu/ciuohqr5pqwxxdqpgaszyr2gmkyh4webkx47a5whlcxapcaqrmpd.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 6 y1 = (yindex // 6) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (6*x2) + (150*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/mo/cmo7kprbhn5foykjllxlvsdu5fash7avpwgsvrlgsnxi6osh7vsj.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 6 y1 = (yindex // 6) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (6*x2) + (24576*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/zd/czdmxdjo6vbpeyu4gsmsvm5mqdlzdrlububsx2rsj2j5l2h4dsrz.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 512 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (400*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/5h/c5hkuqsyg6b4mbsdxblt4xagz75onxk3mtokac7gyjwh7ljruai5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gi/cgimqxxs6qckr3romidticzvnpearx2sdhoesal5xonmke5jjfzs.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7p/c7pmnbahcm3yi2qyuxtlqhxjvvjyzmylknsgacn2efuf4vcdsb3o.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_per_fused__weight_norm_interface_5 = async_compile.triton('triton_per_fused__weight_norm_interface_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_5(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 150 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (150*x0)), rmask & xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/ah/cahfaxtna75ni4ziltb65ap3fcjvdtspp55d36wuo54p7phmd4d7.py # Topologically Sorted Source Nodes: [_weight_norm, x], Original ATen: [aten._weight_norm_interface, aten.convolution] # Source node to ATen node mapping: # _weight_norm => div, mul # x => convolution # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused__weight_norm_interface_convolution_6 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 6 y1 = (yindex // 6) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (6*x2) + (150*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y1), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y1), ymask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (6*x2) + (150*y1)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/j4/cj4nml7l3vz2a35zvcdcxuvsdtqjohvfzpywagxxromilqxzhbpz.py # Topologically Sorted Source Nodes: [x, sub, relu, x_1], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # sub => sub # x => convolution # x_1 => add # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %primals_5), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sub_threshold_backward_7 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + (x2), tmp8, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/le/cle6rmavp35boyijii2h4xuy6bdwo7zsyw5rt263pugye2575zk7.py # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm_1 => pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_7, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) triton_per_fused__weight_norm_interface_8 = async_compile.triton('triton_per_fused__weight_norm_interface_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_8(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 32 XBLOCK: tl.constexpr = 1 rnumel = 400 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (400*x0)), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/jy/cjyd2bfnjahrfv64m5cnh3lmmfssee6lcgjle7w3my7ijmbkorga.py # Topologically Sorted Source Nodes: [_weight_norm_1, x_2], Original ATen: [aten._weight_norm_interface, aten.convolution] # Source node to ATen node mapping: # _weight_norm_1 => div_1, mul_1 # x_2 => convolution_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_6, %pow_4), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %div_1), kwargs = {}) # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %mul_1, %primals_8, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused__weight_norm_interface_convolution_9 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 512 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (400*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y1), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y1), ymask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (16*x2) + (400*y1)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/hf/chf2oujwjqo77d2oewwcjflyfxfwpqvtvewb45opuxhypalaj4hi.py # Topologically Sorted Source Nodes: [x_2, sub_1, relu_1, x_3], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # sub_1 => sub_1 # x_2 => convolution_1 # x_3 => add_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %mul_1, %primals_8, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_1, %primals_9), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_1, %primals_9), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sub_threshold_backward_10 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + (x2), tmp8, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pi/cpiwvvp45ekwuhrxsyobb3n5ljdhjaagu4euvuzvumipx34ehfq4.py # Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm_2 => pow_5, pow_6, sum_3 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_11, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {}) # %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) triton_per_fused__weight_norm_interface_11 = async_compile.triton('triton_per_fused__weight_norm_interface_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_11(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 64 XBLOCK: tl.constexpr = 1 rnumel = 800 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (800*x0)), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gs/cgsknv5coj4ejltnr3jcylmxgk6jlqfs4afhc7njvwvkix2alhpl.py # Topologically Sorted Source Nodes: [_weight_norm_2, x_4], Original ATen: [aten._weight_norm_interface, aten.convolution] # Source node to ATen node mapping: # _weight_norm_2 => div_2, mul_2 # x_4 => convolution_2 # Graph fragment: # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_10, %pow_6), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %mul_2, %primals_12, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused__weight_norm_interface_convolution_12 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 32 y1 = (yindex // 32) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (32*x2) + (800*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y1), None, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (32*x2) + (800*y1)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/6z/c6zwf7iqh7p7uhqktvfopwmrrh7valfg7nvxllhwstvwc5uzjf6w.py # Topologically Sorted Source Nodes: [x_4, sub_2, relu_2, x_5], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu_2 => relu_2 # sub_2 => sub_2 # x_4 => convolution_2 # x_5 => add_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %mul_2, %primals_12, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_2, %primals_13), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_2, %primals_13), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sub_threshold_backward_13 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + (x2), tmp8, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/3k/c3kgdgezw74wchseqbghu26t2cbtgdk7bhf7d5o6twleljcuh3si.py # Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm_3 => pow_7, pow_8, sum_4 # Graph fragment: # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_15, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1, 2, 3], True), kwargs = {}) # %pow_8 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) triton_red_fused__weight_norm_interface_14 = async_compile.triton('triton_red_fused__weight_norm_interface_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__weight_norm_interface_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__weight_norm_interface_14(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (1600*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/32/c32t4jsszdt33uczvurnkh5wbiplpdpqacvw7mhtf6n5kdxl54zb.py # Topologically Sorted Source Nodes: [_weight_norm_3, x_6], Original ATen: [aten._weight_norm_interface, aten.convolution] # Source node to ATen node mapping: # _weight_norm_3 => div_3, mul_3 # x_6 => convolution_3 # Graph fragment: # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_14, %pow_8), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_15, %div_3), kwargs = {}) # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_2, %mul_3, %primals_16, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused__weight_norm_interface_convolution_15 = async_compile.triton('triton_poi_fused__weight_norm_interface_convolution_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_convolution_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = (yindex // 64) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (1600*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y1), None, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + (25*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (64*x2) + (1600*y1)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/7r/c7rdxixjosbh7ymn3x5j2sl25x6yqhleclggjp63z2jy7jyjuqvn.py # Topologically Sorted Source Nodes: [x_6, sub_3, relu_3, x_7], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # relu_3 => relu_3 # sub_3 => sub_3 # x_6 => convolution_3 # x_7 => add_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_2, %mul_3, %primals_16, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_3, %primals_17), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub_3,), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %primals_17), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sub_threshold_backward_16 = async_compile.triton('triton_poi_fused_add_convolution_relu_sub_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sub_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + (x2), tmp8, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/h2/ch2muopo5hd67wmawis4b2yeteunbgvli4oplje4f6sgnddstzxr.py # Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm_4 => div_4, mul_4, pow_10, pow_9, sum_5 # Graph fragment: # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_19, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, [1, 2, 3], True), kwargs = {}) # %pow_10 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_18, %pow_10), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_19, %div_4), kwargs = {}) triton_per_fused__weight_norm_interface_17 = async_compile.triton('triton_per_fused__weight_norm_interface_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_17(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/xg/cxgirr7ywaqvjxpseg4bhe4k4fyif3g5cipvyiq4q2imyyntrosj.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_8 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_3, %mul_4, %primals_20, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_18 = async_compile.triton('triton_poi_fused_convolution_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (4, 6, 64, 64), (24576, 4096, 64, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (32, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_7, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_8, (32, ), (1, )) assert_size_stride(primals_9, (1, ), (1, )) assert_size_stride(primals_10, (64, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_11, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_12, (64, ), (1, )) assert_size_stride(primals_13, (1, ), (1, )) assert_size_stride(primals_14, (128, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_16, (128, ), (1, )) assert_size_stride(primals_17, (1, ), (1, )) assert_size_stride(primals_18, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_19, (1, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_20, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_2, buf0, 96, 25, grid=grid(96, 25), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 6, 64, 64), (24576, 1, 384, 6), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_4, buf1, 24, 4096, grid=grid(24, 4096), stream=stream0) del primals_4 buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_7, buf2, 512, 25, grid=grid(512, 25), stream=stream0) del primals_7 buf3 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_11, buf3, 2048, 25, grid=grid(2048, 25), stream=stream0) del primals_11 buf4 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_15, buf4, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_15 buf5 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32) buf6 = reinterpret_tensor(buf5, (16, 1, 1, 1), (1, 1, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_5.run(buf6, buf0, 16, 150, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((16, 6, 5, 5), (150, 25, 5, 1), torch.float32) buf8 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm, x], Original ATen: [aten._weight_norm_interface, aten.convolution] triton_poi_fused__weight_norm_interface_convolution_6.run(buf0, primals_1, buf6, buf7, buf8, 96, 25, grid=grid(96, 25), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf1, buf8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 16, 32, 32), (16384, 1, 512, 16)) del buf8 buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.float32) buf37 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.bool) # Topologically Sorted Source Nodes: [x, sub, relu, x_1], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sub_threshold_backward_7.run(buf9, primals_3, primals_5, buf10, buf37, 65536, grid=grid(65536), stream=stream0) del buf9 del primals_3 del primals_5 buf11 = empty_strided_cuda((32, 1, 1, 1), (1, 32, 32, 32), torch.float32) buf12 = reinterpret_tensor(buf11, (32, 1, 1, 1), (1, 1, 1, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_8.run(buf12, buf2, 32, 400, grid=grid(32), stream=stream0) buf13 = empty_strided_cuda((32, 16, 5, 5), (400, 25, 5, 1), torch.float32) buf14 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_1, x_2], Original ATen: [aten._weight_norm_interface, aten.convolution] triton_poi_fused__weight_norm_interface_convolution_9.run(buf2, primals_6, buf12, buf13, buf14, 512, 25, grid=grid(512, 25), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf10, buf14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 32, 16, 16), (8192, 1, 512, 32)) del buf14 buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.float32) buf36 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.bool) # Topologically Sorted Source Nodes: [x_2, sub_1, relu_1, x_3], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sub_threshold_backward_10.run(buf15, primals_8, primals_9, buf16, buf36, 32768, grid=grid(32768), stream=stream0) del buf15 del primals_8 del primals_9 buf17 = empty_strided_cuda((64, 1, 1, 1), (1, 64, 64, 64), torch.float32) buf18 = reinterpret_tensor(buf17, (64, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_11.run(buf18, buf3, 64, 800, grid=grid(64), stream=stream0) buf19 = empty_strided_cuda((64, 32, 5, 5), (800, 25, 5, 1), torch.float32) buf20 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_2, x_4], Original ATen: [aten._weight_norm_interface, aten.convolution] triton_poi_fused__weight_norm_interface_convolution_12.run(buf3, primals_10, buf18, buf19, buf20, 2048, 25, grid=grid(2048, 25), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf16, buf20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 64, 8, 8), (4096, 1, 512, 64)) del buf20 buf22 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.float32) buf35 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool) # Topologically Sorted Source Nodes: [x_4, sub_2, relu_2, x_5], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sub_threshold_backward_13.run(buf21, primals_12, primals_13, buf22, buf35, 16384, grid=grid(16384), stream=stream0) del buf21 del primals_12 del primals_13 buf23 = empty_strided_cuda((128, 1, 1, 1), (1, 128, 128, 128), torch.float32) buf24 = reinterpret_tensor(buf23, (128, 1, 1, 1), (1, 1, 1, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface] triton_red_fused__weight_norm_interface_14.run(buf24, buf4, 128, 1600, grid=grid(128), stream=stream0) buf25 = empty_strided_cuda((128, 64, 5, 5), (1600, 25, 5, 1), torch.float32) buf26 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_3, x_6], Original ATen: [aten._weight_norm_interface, aten.convolution] triton_poi_fused__weight_norm_interface_convolution_15.run(buf4, primals_14, buf24, buf25, buf26, 8192, 25, grid=grid(8192, 25), stream=stream0) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf22, buf26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 4, 4), (2048, 1, 512, 128)) del buf26 buf28 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) buf34 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.bool) # Topologically Sorted Source Nodes: [x_6, sub_3, relu_3, x_7], Original ATen: [aten.convolution, aten.sub, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sub_threshold_backward_16.run(buf27, primals_16, primals_17, buf28, buf34, 8192, grid=grid(8192), stream=stream0) del buf27 del primals_16 del primals_17 buf29 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf30 = buf29; del buf29 # reuse buf31 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_17.run(buf30, primals_19, primals_18, buf31, 1, 128, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf28, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1, 4, 4), (16, 1, 4, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_18.run(buf33, primals_20, 64, grid=grid(64), stream=stream0) del primals_20 return (reinterpret_tensor(buf33, (1, 64), (64, 1), 0), buf7, buf13, buf19, buf25, buf31, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, primals_18, primals_19, buf6, buf7, buf10, buf12, buf13, buf16, buf18, buf19, buf22, buf24, buf25, buf28, buf30, buf31, buf34, buf35, buf36, buf37, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 6, 64, 64), (24576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F import torch.nn.utils.weight_norm as weightNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 6 y1 = yindex // 6 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 6 * x2 + 150 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 6 y1 = yindex // 6 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 6 * x2 + 24576 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_per_fused__weight_norm_interface_5(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 150 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 150 * x0), rmask & xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 6 y1 = yindex // 6 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 6 * x2 + 150 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 6 * x2 + 150 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_per_fused__weight_norm_interface_8(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 400 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 400 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 16 * x2 + 400 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_per_fused__weight_norm_interface_11(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 800 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 800 * x0), rmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 32 y1 = yindex // 32 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 32 * x2 + 800 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 32 * x2 + 800 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_red_fused__weight_norm_interface_14(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 1600 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_convolution_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = yindex // 64 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 1600 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2 + 25 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 64 * x2 + 1600 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_sub_threshold_backward_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 + tmp4 tmp9 = 0.0 tmp10 = tmp7 <= tmp9 tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_per_fused__weight_norm_interface_17(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 6, 64, 64), (24576, 4096, 64, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (32, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_7, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_8, (32,), (1,)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (64, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_11, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_12, (64,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (128, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (1,), (1,)) assert_size_stride(primals_18, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_19, (1, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_20, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(96, 25)](primals_2, buf0, 96, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 6, 64, 64), (24576, 1, 384, 6), torch .float32) triton_poi_fused_1[grid(24, 4096)](primals_4, buf1, 24, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch. float32) triton_poi_fused_2[grid(512, 25)](primals_7, buf2, 512, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_7 buf3 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch. float32) triton_poi_fused_3[grid(2048, 25)](primals_11, buf3, 2048, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_11 buf4 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_4[grid(8192, 25)](primals_15, buf4, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_15 buf5 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32 ) buf6 = reinterpret_tensor(buf5, (16, 1, 1, 1), (1, 1, 1, 1), 0) del buf5 triton_per_fused__weight_norm_interface_5[grid(16)](buf6, buf0, 16, 150, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((16, 6, 5, 5), (150, 25, 5, 1), torch.float32 ) buf8 = empty_strided_cuda((16, 6, 5, 5), (150, 1, 30, 6), torch.float32 ) triton_poi_fused__weight_norm_interface_convolution_6[grid(96, 25)]( buf0, primals_1, buf6, buf7, buf8, 96, 25, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf1, buf8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 16, 32, 32), (16384, 1, 512, 16)) del buf8 buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.float32) buf37 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16), torch.bool) triton_poi_fused_add_convolution_relu_sub_threshold_backward_7[grid (65536)](buf9, primals_3, primals_5, buf10, buf37, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf9 del primals_3 del primals_5 buf11 = empty_strided_cuda((32, 1, 1, 1), (1, 32, 32, 32), torch. float32) buf12 = reinterpret_tensor(buf11, (32, 1, 1, 1), (1, 1, 1, 1), 0) del buf11 triton_per_fused__weight_norm_interface_8[grid(32)](buf12, buf2, 32, 400, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((32, 16, 5, 5), (400, 25, 5, 1), torch. float32) buf14 = empty_strided_cuda((32, 16, 5, 5), (400, 1, 80, 16), torch. float32) triton_poi_fused__weight_norm_interface_convolution_9[grid(512, 25)]( buf2, primals_6, buf12, buf13, buf14, 512, 25, XBLOCK=1, YBLOCK =256, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(buf10, buf14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 32, 16, 16), (8192, 1, 512, 32)) del buf14 buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.float32) buf36 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32), torch.bool) triton_poi_fused_add_convolution_relu_sub_threshold_backward_10[grid (32768)](buf15, primals_8, primals_9, buf16, buf36, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf15 del primals_8 del primals_9 buf17 = empty_strided_cuda((64, 1, 1, 1), (1, 64, 64, 64), torch. float32) buf18 = reinterpret_tensor(buf17, (64, 1, 1, 1), (1, 1, 1, 1), 0) del buf17 triton_per_fused__weight_norm_interface_11[grid(64)](buf18, buf3, 64, 800, num_warps=8, num_stages=1) buf19 = empty_strided_cuda((64, 32, 5, 5), (800, 25, 5, 1), torch. float32) buf20 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch .float32) triton_poi_fused__weight_norm_interface_convolution_12[grid(2048, 25)]( buf3, primals_10, buf18, buf19, buf20, 2048, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf16, buf20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 64, 8, 8), (4096, 1, 512, 64)) del buf20 buf22 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .float32) buf35 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .bool) triton_poi_fused_add_convolution_relu_sub_threshold_backward_13[grid (16384)](buf21, primals_12, primals_13, buf22, buf35, 16384, XBLOCK=128, num_warps=4, num_stages=1) del buf21 del primals_12 del primals_13 buf23 = empty_strided_cuda((128, 1, 1, 1), (1, 128, 128, 128), torch.float32) buf24 = reinterpret_tensor(buf23, (128, 1, 1, 1), (1, 1, 1, 1), 0) del buf23 triton_red_fused__weight_norm_interface_14[grid(128)](buf24, buf4, 128, 1600, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf25 = empty_strided_cuda((128, 64, 5, 5), (1600, 25, 5, 1), torch .float32) buf26 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused__weight_norm_interface_convolution_15[grid(8192, 25)]( buf4, primals_14, buf24, buf25, buf26, 8192, 25, XBLOCK=32, YBLOCK=128, num_warps=8, num_stages=1) buf27 = extern_kernels.convolution(buf22, buf26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 4, 4), (2048, 1, 512, 128)) del buf26 buf28 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) buf34 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.bool) triton_poi_fused_add_convolution_relu_sub_threshold_backward_16[grid (8192)](buf27, primals_16, primals_17, buf28, buf34, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf27 del primals_16 del primals_17 buf29 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf30 = buf29 del buf29 buf31 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_17[grid(1)](buf30, primals_19, primals_18, buf31, 1, 128, XBLOCK=1, num_warps=2, num_stages=1) buf32 = extern_kernels.convolution(buf28, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1, 4, 4), (16, 1, 4, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_18[grid(64)](buf33, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 return (reinterpret_tensor(buf33, (1, 64), (64, 1), 0), buf7, buf13, buf19, buf25, buf31, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, primals_18, primals_19, buf6, buf7, buf10, buf12, buf13, buf16, buf18, buf19, buf22, buf24, buf25, buf28, buf30, buf31, buf34, buf35, buf36, buf37) class TReLU(nn.Module): def __init__(self): super(TReLU, self).__init__() self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True) self.alpha.data.fill_(0) def forward(self, x): x = F.relu(x - self.alpha) + self.alpha return x class DiscriminatorNew(nn.Module): def __init__(self): super(DiscriminatorNew, self).__init__() self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2)) self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2)) self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2)) self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2)) self.conv4 = weightNorm(nn.Conv2d(128, 1, 1, 1, 0)) self.relu0 = TReLU() self.relu1 = TReLU() self.relu2 = TReLU() self.relu3 = TReLU() def forward(self, input_0): primals_3 = self.conv0.bias primals_1 = self.conv0.weight_g primals_2 = self.conv0.weight_v primals_8 = self.conv1.bias primals_6 = self.conv1.weight_g primals_7 = self.conv1.weight_v primals_12 = self.conv2.bias primals_10 = self.conv2.weight_g primals_11 = self.conv2.weight_v primals_16 = self.conv3.bias primals_14 = self.conv3.weight_g primals_15 = self.conv3.weight_v primals_5 = self.conv4.bias primals_18 = self.conv4.weight_g primals_19 = self.conv4.weight_v primals_9 = self.relu0.alpha primals_13 = self.relu1.alpha primals_17 = self.relu2.alpha primals_20 = self.relu3.alpha primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0]
archiroid003/ICCV2019-LearningToPaint
Discriminator
false
12,134
[ "MIT" ]
0
4b5fc263e4843c159a61e5956956b3f7812693f8
https://github.com/archiroid003/ICCV2019-LearningToPaint/tree/4b5fc263e4843c159a61e5956956b3f7812693f8
LanguageModelCriterion
import torch import torch.nn as nn from torch.autograd import * class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Zhendong-Wang/arsm_image_captioning
LanguageModelCriterion
false
9,661
[ "MIT" ]
0
2282b76ab03b53952269d94d6c4b19ab98636ca5
https://github.com/Zhendong-Wang/arsm_image_captioning/tree/2282b76ab03b53952269d94d6c4b19ab98636ca5
MyEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [logsoftmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logsoftmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/56/c56zvtoyao27obzv5t3wc5ghkyyf6yov7jqfbnej5tgl6xhbdwlv.py # Topologically Sorted Source Nodes: [logsoftmax, getitem, loss, mean], Original ATen: [aten._log_softmax, aten.index, aten.neg, aten.mean] # Source node to ATen node mapping: # getitem => index # logsoftmax => exp, log, sub_1, sum_1 # loss => neg # mean => mean # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%sub_1, [%iota_default, %arg1_1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%index,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) triton_per_fused__log_softmax_index_mean_neg_1 = async_compile.triton('triton_per_fused__log_softmax_index_mean_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_index_mean_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_index_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = (rindex // 16) r0 = rindex % 16 tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp9 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r0 + (16*tmp4) + (64*r1)), None) tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tmp20 = -tmp19 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp24 = 64.0 tmp25 = tmp23 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logsoftmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [logsoftmax, getitem, loss, mean], Original ATen: [aten._log_softmax, aten.index, aten.neg, aten.mean] triton_per_fused__log_softmax_index_mean_neg_1.run(buf2, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_index_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 16 r0 = rindex % 16 tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp9 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r0 + 16 * tmp4 + 64 * r1), None) tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tmp20 = -tmp19 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp24 = 64.0 tmp25 = tmp23 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_index_mean_neg_1[grid(1)](buf2, arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class MyEntropyNew(nn.Module): def __init__(self): super(MyEntropyNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
atimashov/object_detection
MyEntropy
false
3,135
[ "MIT" ]
0
922cd88f429156fa4668c7d718b2665e4ab875fd
https://github.com/atimashov/object_detection/tree/922cd88f429156fa4668c7d718b2665e4ab875fd
BasicCNN1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/cb/ccbdveqsgunhla4cfhtfjdkif3utkipupej6kcqiiji3bjat2x63.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/nl/cnlxxnmf5ygykjzvjljgvug3jvcu7du2i7h7ssfgcz5bdrzqhq5k.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) % 32 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (64*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + (64*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/na/cnaiponhu6kavyqptxmbfaxdb2osqwlqk74kcqgtnst5s3wwic5o.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4x/c4xqpnjemnncshp3uobexba3gggcnvtfyqm77kedjl5hqdw3frxf.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 16 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + (128*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/sl/csldasgiybuk75pltndkezbj76bkzandkkdq65rbg7u7qjxpoaag.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/uf/cufauwupgjdrp6mauwivuddmfomag5gobvtyuvy22h7zpdtrudh5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = (yindex // 8) y5 = yindex y4 = (yindex // 64) y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + (256*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (64*x2) + (8192*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/4z/c4zkjrymdmkix2ew2ul2mfnu6lkblf6zlcrmopfi6cbx5ypowtyn.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_10 = async_compile.triton('triton_poi_fused_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 216 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (216, 2048), (2048, 1)) assert_size_stride(primals_9, (216, ), (1, )) assert_size_stride(primals_10, (10, 216), (216, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 96, 9, grid=grid(96, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf5, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf5, buf6, buf7, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf9, primals_5, 262144, grid=grid(262144), stream=stream0) del primals_5 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf9, buf10, buf11, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf13, primals_7, 131072, grid=grid(131072), stream=stream0) del primals_7 buf14 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_9.run(buf13, buf14, buf15, 256, 128, grid=grid(256, 128), stream=stream0) buf16 = empty_strided_cuda((16, 216), (216, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 216), (1, 2048), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_10.run(buf17, primals_9, 3456, grid=grid(3456), stream=stream0) del primals_9 buf18 = empty_strided_cuda((16, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf17, reinterpret_tensor(primals_10, (216, 10), (1, 216), 0), alpha=1, beta=1, out=buf18) del primals_11 return (buf18, buf0, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), buf17, primals_10, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((216, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((216, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 216), (216, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 32 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 64 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2080 + x0 + 64 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 16 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y5 = yindex y4 = yindex // 64 y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2048 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2176 + x2 + 256 * y0 + 4096 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 64 * x2 + 8192 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 216 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (216, 2048), (2048, 1)) assert_size_stride(primals_9, (216,), (1,)) assert_size_stride(primals_10, (10, 216), (216, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(524288)](buf5, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32) buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(131072)](buf5, buf6, buf7, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(262144)](buf9, primals_5, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(65536)](buf9, buf10, buf11, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_8[grid(131072)](buf13, primals_7, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf14 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_9[grid(256, 128)](buf13, buf14, buf15, 256, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((16, 216), (216, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 216), (1, 2048), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_10[grid(3456)](buf17, primals_9, 3456, XBLOCK =256, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf17, reinterpret_tensor( primals_10, (216, 10), (1, 216), 0), alpha=1, beta=1, out=buf18) del primals_11 return (buf18, buf0, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (16, 2048), (2048, 1 ), 0), buf17, primals_10, primals_8) class BasicCNN1New(nn.Module): def __init__(self): super().__init__() self.layer_names = ['conv1', 'conv2', 'conv3', 'fc1', 'output_layer'] self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 128, 216) self.output_layer = nn.Linear(216, 10) self.dropout = nn.Dropout(0.2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fnc11/CosDefence
BasicCNN1
false
10,119
[ "MIT" ]
0
94f451b7d4b36cb3b9fcc85098dae242f311532b
https://github.com/fnc11/CosDefence/tree/94f451b7d4b36cb3b9fcc85098dae242f311532b
PreNet
import torch import torch.nn as nn class PreNet(nn.Module): def __init__(self): super(PreNet, self).__init__() self.conv1 = nn.Conv2d(4, 8, 3, padding=1) self.act1 = nn.LeakyReLU(0.2, inplace=False) self.conv2 = nn.Conv2d(8, 16, 3, padding=1) self.act2 = nn.LeakyReLU(0.2, inplace=False) def forward(self, labels): conv1 = self.act1(self.conv1(labels)) conv2 = self.act2(self.conv2(conv1)) return conv2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(512)](buf0, primals_2, buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1)) buf4 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool) buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) triton_poi_fused_convolution_leaky_relu_1[grid(1024)](buf3, primals_5, buf4, buf5, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4 class PreNetNew(nn.Module): def __init__(self): super(PreNetNew, self).__init__() self.conv1 = nn.Conv2d(4, 8, 3, padding=1) self.act1 = nn.LeakyReLU(0.2, inplace=False) self.conv2 = nn.Conv2d(8, 16, 3, padding=1) self.act2 = nn.LeakyReLU(0.2, inplace=False) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
karavik18/Federated_Learning_for_Missing_MRI_Sequence
PreNet
false
7,012
[ "Apache-2.0" ]
1
42924f8475f354e6b429d05867f99530aa485b96
https://github.com/karavik18/Federated_Learning_for_Missing_MRI_Sequence/tree/42924f8475f354e6b429d05867f99530aa485b96
LigthSpeechLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ox/coxh4zxlwehvc7dn52ckdepn62ln2jmlbsk5mwd7tp7xur2tywk6.py # Topologically Sorted Source Nodes: [mel_loss], Original ATen: [aten.mse_loss] # Source node to ATen node mapping: # mel_loss => mean, pow_1, sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bq/cbqlvx3fqrosdwrna6ssfaprt3lceueeyt3u5o4low2mfdmh2ur6.py # Topologically Sorted Source Nodes: [similarity_loss], Original ATen: [aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # similarity_loss => abs_1, mean_1, sub_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_mean_sub_1 = async_compile.triton('triton_per_fused_abs_mean_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mel_loss], Original ATen: [aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [similarity_loss], Original ATen: [aten.sub, aten.abs, aten.mean] triton_per_fused_abs_mean_sub_1.run(buf4, arg3_1, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del arg3_1 buf2 = empty_strided_cuda((), (), torch.float32) buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [duration_loss], Original ATen: [aten.sub, aten.abs, aten.mean] triton_per_fused_abs_mean_sub_1.run(buf5, arg5_1, arg4_1, 1, 256, grid=grid(1), stream=stream0) del arg4_1 del arg5_1 return (buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) @triton.jit def triton_per_fused_abs_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_0[grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf4 = buf1 del buf1 triton_per_fused_abs_mean_sub_1[grid(1)](buf4, arg3_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del arg3_1 buf2 = empty_strided_cuda((), (), torch.float32) buf5 = buf2 del buf2 triton_per_fused_abs_mean_sub_1[grid(1)](buf5, arg5_1, arg4_1, 1, 256, num_warps=2, num_stages=1) del arg4_1 del arg5_1 return buf3, buf4, buf5 class LigthSpeechLossNew(nn.Module): """ LigthSpeech Loss """ def __init__(self): super(LigthSpeechLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3, input_4, input_5): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 arg5_1 = input_5 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1]) return output[0], output[1], output[2]
entn-at/LightSpeech
LigthSpeechLoss
false
6,654
[ "MIT" ]
1
48250fbcede4b258ba13ab17e3e83afc5fe85a01
https://github.com/entn-at/LightSpeech/tree/48250fbcede4b258ba13ab17e3e83afc5fe85a01
myEncoder
import torch import torch.nn.functional as F class myEncoder(torch.nn.Module): def __init__(self, fomSize, romSize): super(myEncoder, self).__init__() self.fc1 = torch.nn.Linear(fomSize, 200) self.fc2 = torch.nn.Linear(200, 64) self.fc3 = torch.nn.Linear(64, romSize) def forward(self, x): x = self.fc1(x) x = F.elu(x) x = self.fc2(x) x = F.elu(x) x = self.fc3(x) x = F.elu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'fomSize': 4, 'romSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 200), (200, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(12800)](buf0, buf1, 12800, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 64), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch. float32) triton_poi_fused_elu_1[grid(4096)](buf2, buf3, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_elu_2[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 64), (64, 1), 0 ), buf4, primals_6, primals_4 class myEncoderNew(torch.nn.Module): def __init__(self, fomSize, romSize): super(myEncoderNew, self).__init__() self.fc1 = torch.nn.Linear(fomSize, 200) self.fc2 = torch.nn.Linear(200, 64) self.fc3 = torch.nn.Linear(64, romSize) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Pressio/pressio4py
myEncoder
false
17,834
[ "Unlicense", "BSD-3-Clause" ]
4
36676dbd112a7c7960ccbf302ff14d4376c819ec
https://github.com/Pressio/pressio4py/tree/36676dbd112a7c7960ccbf302ff14d4376c819ec
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3v/c3vyztzgw3iiqfspecgmyuijbvk6r675ogzi4x5mnjsc4kdyk3gp.py # Topologically Sorted Source Nodes: [distances], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] # Source node to ATen node mapping: # distances => clamp_min, clamp_min_1, div, div_1, mul, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %clamp_min), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {}) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x3), xmask) tmp17 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/26/c26z3t2s2o7fbb3srtzysccunz5ychms4ztfx6xensu3es5gjxgt.py # Topologically Sorted Source Nodes: [distances], Original ATen: [aten.sum] # Source node to ATen node mapping: # distances => sum_3 # Graph fragment: # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ur/curaeceqtciv33kgsrtwhpmo4qktwur2c3s37rzlprbqh72g36v2.py # Topologically Sorted Source Nodes: [sub, sub_1, relu, pow_1, mul, sub_2, pow_2, mul_1, truediv, losses, mean], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.div, aten.add, aten.mean] # Source node to ATen node mapping: # losses => add # mean => mean # mul => mul_1 # mul_1 => mul_2 # pow_1 => pow_5 # pow_2 => pow_6 # relu => relu # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # truediv => div_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.5, %sum_3), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %pow_5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sum_3), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_6), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %div_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {}) triton_per_fused_add_div_mean_mul_pow_relu_rsub_2 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_relu_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_relu_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_pow_relu_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp3 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp4 - tmp3 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 * tmp7 tmp9 = tmp2 * tmp8 tmp10 = tmp1 - tmp3 tmp11 = tmp10 * tmp10 tmp12 = tmp0 * tmp11 tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp9 + tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 256.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [distances], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [distances], Original ATen: [aten.sum] triton_poi_fused_sum_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [sub, sub_1, relu, pow_1, mul, sub_2, pow_2, mul_1, truediv, losses, mean], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.div, aten.add, aten.mean] triton_per_fused_add_div_mean_mul_pow_relu_rsub_2.run(buf3, arg2_1, buf1, 1, 256, grid=grid(1), stream=stream0) del arg2_1 return (buf3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import CosineSimilarity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_pow_relu_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp4 - tmp3 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tmp7 * tmp7 tmp9 = tmp2 * tmp8 tmp10 = tmp1 - tmp3 tmp11 = tmp10 * tmp10 tmp12 = tmp0 * tmp11 tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp9 + tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 256.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)]( arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_sum_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mean_mul_pow_relu_rsub_2[grid(1)](buf3, arg2_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg2_1 return buf3, buf1 class ContrastiveLossNew(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise """ def __init__(self, margin=0.5): super(ContrastiveLossNew, self).__init__() self.margin = margin self.distance = CosineSimilarity() self.eps = 1e-06 self.mse = torch.nn.MSELoss() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
elloworl/FRMiner2.0
ContrastiveLoss
false
10,020
[ "MIT" ]
0
f596530d18512a1b1b8b8d56772f006f9f53f429
https://github.com/elloworl/FRMiner2.0/tree/f596530d18512a1b1b8b8d56772f006f9f53f429