entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nu/cnuyiboekdklervsd4zozxy6hj5ypmcigwk6x427x6ylotbbcr5k.py # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # std => var # sub => sub # x => div # x_1 => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_5), kwargs = {}) triton_per_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-08 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = buf0; del buf0 # reuse buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-08 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5 class LayerNormNew(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNormNew, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kangzhiq/DeepFillv2_Pytorch
LayerNorm
false
10,429
[ "MIT" ]
0
9c7ed61b25bb995713f89108b712490737abe1b1
https://github.com/kangzhiq/DeepFillv2_Pytorch/tree/9c7ed61b25bb995713f89108b712490737abe1b1
RMSE_log
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ad/cadjhctvyfojnn7diqrt33hosai3lxiqzixa3xwfvqphtub2drtg.py # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] # Source node to ATen node mapping: # abs_1 => abs_1 # log => log # log_1 => log_1 # loss => sqrt # mean => mean # pow_1 => pow_1 # sub => sub # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {}) triton_per_fused_abs_log_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_abs_log_mean_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_log_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [log, log_1, sub, abs_1, pow_1, mean, loss], Original ATen: [aten.log, aten.sub, aten.abs, aten.pow, aten.mean, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_log_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = libdevice.sqrt(tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_log_mean_pow_sqrt_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSE_logNew(nn.Module): def __init__(self): super(RMSE_logNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Khoronus/MonoDepth-FPN-PyTorch
RMSE_log
false
717
[ "MIT" ]
0
6e41e297723d1490c537e04afff905c61d6f0ff8
https://github.com/Khoronus/MonoDepth-FPN-PyTorch/tree/6e41e297723d1490c537e04afff905c61d6f0ff8
MSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7p/c7po76azmxaenhqdhxmjkwmmyrh5jpr4p5zydgfurvfy3q5qm6gp.py # Topologically Sorted Source Nodes: [mse_loss, loss_cls], Original ATen: [aten.mse_loss, aten.mul] # Source node to ATen node mapping: # loss_cls => mul # mse_loss => mean, pow_1, sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mse_loss, loss_cls], Original ATen: [aten.mse_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossNew(nn.Module): """MSE loss. Args: reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". Defaults to 'mean'. loss_weight (float): Weight of the loss. Defaults to 1.0. """ def __init__(self, reduction='mean', loss_weight=1.0, loss_name='mse_loss' ): super().__init__() self.reduction = reduction self.loss_weight = loss_weight self.loss_name = loss_name self.cls_criterion = nn.MSELoss(reduction=reduction) def get_name(self): return 'MSE_loss' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
youqingxiaozhua/ABAW3
MSELoss
false
11,057
[ "Apache-2.0" ]
0
51ab58ab311ecd6603a8485a45af0dcc39880e69
https://github.com/youqingxiaozhua/ABAW3/tree/51ab58ab311ecd6603a8485a45af0dcc39880e69
SE
import torch import torch.nn as nn import torch.nn.functional as F def swish(input): return input * input.sigmoid() class SE(nn.Module): def __init__(self, in_channels, se_channels): super(SE, self).__init__() self.se1 = nn.Conv2d(in_channels, se_channels, kernel_size=1, bias=True ) self.se2 = nn.Conv2d(se_channels, in_channels, kernel_size=1, bias=True ) def forward(self, input): output = F.adaptive_avg_pool2d(input, (1, 1)) output = swish(self.se1(output)) output = self.se2(output).sigmoid() output = input * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'se_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_mul_sigmoid_1[grid(16)](buf3, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6 def swish(input): return input * input.sigmoid() class SENew(nn.Module): def __init__(self, in_channels, se_channels): super(SENew, self).__init__() self.se1 = nn.Conv2d(in_channels, se_channels, kernel_size=1, bias=True ) self.se2 = nn.Conv2d(se_channels, in_channels, kernel_size=1, bias=True ) def forward(self, input_0): primals_2 = self.se1.weight primals_3 = self.se1.bias primals_4 = self.se2.weight primals_5 = self.se2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CYHYCY/cifar10
SE
false
4,955
[ "Apache-2.0" ]
1
37254801045b76604a922884da87744aeb99b416
https://github.com/CYHYCY/cifar10/tree/37254801045b76604a922884da87744aeb99b416
Model
import torch class Model(torch.nn.Module): def __init__(self, D_in, D_out): super(Model, self).__init__() self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out), requires_grad=True) self.sig = torch.nn.Sigmoid() def forward(self, x): y_pred = self.sig(x.mm(self.w1)) return y_pred def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'D_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(16)](buf1, 16, XBLOCK=16, num_warps =1, num_stages=1) return buf1, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class ModelNew(torch.nn.Module): def __init__(self, D_in, D_out): super(ModelNew, self).__init__() self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out), requires_grad=True) self.sig = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.w1 primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Saran-nns/gradients
Model
false
1,018
[ "MIT" ]
0
67f9ff92589047828563dbbe30f225dca7ad47fd
https://github.com/Saran-nns/gradients/tree/67f9ff92589047828563dbbe30f225dca7ad47fd
EmissionModel
import torch import torch.utils.data class EmissionModel(torch.nn.Module): """ - forward(): computes the log probability of an observation. - sample(): given a state, sample an observation for that state. """ def __init__(self, N, M): super(EmissionModel, self).__init__() self.N = N self.M = M self.unnormalized_emission_matrix = torch.nn.Parameter(torch.randn( N, M)) def forward(self, x_t): """ x_t : LongTensor of shape (batch size) Get observation probabilities """ emission_matrix = torch.nn.functional.log_softmax(self. unnormalized_emission_matrix, dim=1) out = emission_matrix[:, x_t].transpose(0, 1) return out def get_inputs(): return [torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'N': 4, 'M': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_index_1[grid(16)](primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 4), (1, 4), 0), primals_1, primals_2 class EmissionModelNew(torch.nn.Module): """ - forward(): computes the log probability of an observation. - sample(): given a state, sample an observation for that state. """ def __init__(self, N, M): super(EmissionModelNew, self).__init__() self.N = N self.M = M self.unnormalized_emission_matrix = torch.nn.Parameter(torch.randn( N, M)) def forward(self, input_0): primals_1 = self.unnormalized_emission_matrix primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dendisuhubdy/pytorch_HMM
EmissionModel
false
15,156
[ "Apache-2.0" ]
88
3235326027328e1b0377b17f9dad8fcc56a3668c
https://github.com/dendisuhubdy/pytorch_HMM/tree/3235326027328e1b0377b17f9dad8fcc56a3668c
BertSelfOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dt/cdtfyiepqseugx5m3udiopa26uo6fdp2fyvmfcoipxuqyqqnb2l6.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_3, primals_4, 256, grid=grid(256), stream=stream0) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_6 return (buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1 class BertSelfOutputNew(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, input_0, input_1): primals_3 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_2 = self.dense.weight primals_6 = self.dense.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
igor0/BLIP
BertSelfOutput
false
16,250
[ "BSD-3-Clause" ]
473
6d8c3f1e381ed23acb84c55b4adb80e74c08117a
https://github.com/igor0/BLIP/tree/6d8c3f1e381ed23acb84c55b4adb80e74c08117a
SpatialAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3jyybnsmycovxduh7msitibikymwdrwokyrxcb4r43tskaegg7.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %permute_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 + tmp0 tmp8 = tmp3 + tmp7 tmp9 = tmp5 + tmp8 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py # Topologically Sorted Source Nodes: [convolved, out], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # convolved => convolution # out => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [convolved], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [convolved, out], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 + tmp0 tmp8 = tmp3 + tmp7 tmp9 = tmp5 + tmp8 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class SpatialAttentionNew(nn.Module): def __init__(self, in_channels): super().__init__() self.activation = nn.Sigmoid() self.maxpool = nn.MaxPool2d((1, in_channels)) self.avgpool = nn.AvgPool2d((1, in_channels)) self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7, padding=3) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dqawami/openvino_training_extensions
SpatialAttention
false
15,218
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
AuxiliaryConvolutions
import torch from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) conv11_2_feats = F.relu(self.conv11_2(out)) return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from itertools import product as product import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None) tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 524288 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 1024 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 524288 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 256 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 65536 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 50176 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 196 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 50176 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 144 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 36864 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 144 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 36864 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_4, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (128, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 4096)](primals_3, buf0, 4096, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(131072, 9)](primals_4, buf1, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_8, buf2, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_12, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_16, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_3[grid(4194304)](buf6, primals_2, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf8 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf9 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512), torch.float32) triton_poi_fused_convolution_relu_4[grid(2048, 1024)](buf7, primals_5, buf8, buf9, 2048, 1024, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf7 del primals_5 buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) del buf9 buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_5[grid(524288)](buf11, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_convolution_relu_6[grid(1024, 256)](buf12, primals_9, buf13, buf14, 1024, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf12 del primals_9 buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 16, 16), (32768, 1, 2048, 128)) del buf14 buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_7[grid(131072)](buf16, primals_11, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 14, 14), (50176, 1, 3584, 256)) buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32) buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.float32) triton_poi_fused_convolution_relu_8[grid(1024, 196)](buf17, primals_13, buf18, buf19, 1024, 196, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf17 del primals_13 buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 14, 14), (25088, 1, 1792, 128)) del buf19 buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_9[grid(100352)](buf21, primals_15, 100352, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 256, 12, 12), (36864, 1, 3072, 256)) buf23 = empty_strided_cuda((4, 256, 12, 12), (36864, 144, 12, 1), torch.float32) buf24 = empty_strided_cuda((4, 256, 12, 12), (36864, 1, 3072, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_10[grid(1024, 144) ](buf22, primals_17, buf23, buf24, 1024, 144, XBLOCK=32, YBLOCK =32, num_warps=4, num_stages=1) del buf22 del primals_17 return (buf8, buf13, buf18, buf23, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, buf6, buf8, buf11, buf13, buf16, buf18, buf21, buf24) class AuxiliaryConvolutionsNew(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutionsNew, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, input_0): primals_1 = self.conv8_1.weight primals_2 = self.conv8_1.bias primals_4 = self.conv8_2.weight primals_5 = self.conv8_2.bias primals_6 = self.conv9_1.weight primals_7 = self.conv9_1.bias primals_8 = self.conv9_2.weight primals_9 = self.conv9_2.bias primals_10 = self.conv10_1.weight primals_11 = self.conv10_1.bias primals_12 = self.conv10_2.weight primals_13 = self.conv10_2.bias primals_14 = self.conv11_1.weight primals_15 = self.conv11_1.bias primals_16 = self.conv11_2.weight primals_17 = self.conv11_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1], output[2], output[3]
dee-walia20/SSD-Implementation-using-Pytorch
AuxiliaryConvolutions
false
6,644
[ "MIT" ]
1
2a7dcdcea2787f4bffd45f335819f08af2b525dd
https://github.com/dee-walia20/SSD-Implementation-using-Pytorch/tree/2a7dcdcea2787f4bffd45f335819f08af2b525dd
Gate
import torch import torch.nn as nn import torch.nn.functional as F class Gate(nn.Module): """Gate Unit g = sigmoid(Wx) x = g * x """ def __init__(self, input_size, dropout_rate=0.0): super(Gate, self).__init__() self.linear = nn.Linear(input_size, input_size, bias=False) self.dropout_rate = dropout_rate def forward(self, x): """ Args: x: batch * len * dim x_mask: batch * len (1 for padding, 0 for true) Output: res: batch * len * dim """ if self.dropout_rate: x = F.dropout(x, p=self.dropout_rate, training=self.training) x_proj = self.linear(x) gate = torch.sigmoid(x) return x_proj * gate def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_2 class GateNew(nn.Module): """Gate Unit g = sigmoid(Wx) x = g * x """ def __init__(self, input_size, dropout_rate=0.0): super(GateNew, self).__init__() self.linear = nn.Linear(input_size, input_size, bias=False) self.dropout_rate = dropout_rate def forward(self, input_0): primals_1 = self.linear.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Raiselimit/TorchBlocks
Gate
false
5,751
[ "MIT" ]
1
a5baecb9a2470ff175087475630f2b7db3f7ef51
https://github.com/Raiselimit/TorchBlocks/tree/a5baecb9a2470ff175087475630f2b7db3f7ef51
CAT_TokenEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rr/crr4uc5tissxvuw4bzh6fo6y7osbgp6kedo24pzjiyyorxmxyfkc.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] # Source node to ATen node mapping: # pad => copy # Graph fragment: # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {}) triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 6 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex x1 = xindex tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + ((-4) + x1 + (4*y0)), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float("nan") tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + ((-20) + x1 + (4*y0)), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x1 + (4*y0)), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + ((-4) + x1 + (4*y0)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + (6*x1)), tmp42, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6v/c6vcpvedkedg5f7dbt5warppc4dneicv65dm4nutaxergetxh3x5.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_scatter_default_2, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (10, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_0.run(primals_1, buf1, 6, 4, grid=grid(6, 4), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 4), (40, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_3, 160, grid=grid(160), stream=stream0) del primals_3 return (reinterpret_tensor(buf3, (10, 4, 4), (4, 1, 40), 0), primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 6 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex x1 = xindex tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x1 + 4 * y0), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x1 + 4 * y0), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (10, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(6, 4)](primals_1, buf1, 6, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 4), (40, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(160)](buf3, primals_3, 160, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (10, 4, 4), (4, 1, 40), 0), primals_2, buf1 class CAT_TokenEmbeddingNew(nn.Module): def __init__(self, c_in=1, d_feature=10): super(CAT_TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature, kernel_size=3, padding=padding, padding_mode='circular') for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_3 = self.tokenConv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mkmysk123456789/Informer2020
CAT_TokenEmbedding
false
7,250
[ "Apache-2.0" ]
1
ad4b895169a17db580aab6d2c09fd07e06c9b6fa
https://github.com/mkmysk123456789/Informer2020/tree/ad4b895169a17db580aab6d2c09fd07e06c9b6fa
InterProbCrossEntropyLoss
import torch import torch.utils.data class InterProbCrossEntropyLoss(torch.nn.Module): def __init__(self, in_features, num_classes): super(InterProbCrossEntropyLoss, self).__init__() self.in_features = in_features self.num_classes = num_classes self.fc = torch.nn.Linear(in_features, num_classes) def forward(self, x, target, mask=None): log_prob = self.fc(x).log_softmax(-1) loss = -(target * log_prob).sum(-1) if mask is not None: loss = loss * mask.view(-1) return loss def pack_init_args(self): args = {'in_features': self.in_features, 'num_classes': self. num_classes} return args def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp1 - tmp12 tmp14 = tmp0 * tmp13 tmp16 = tmp3 - tmp12 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp20 = tmp6 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp24 = tmp9 - tmp12 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x0, tmp27, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](primals_4, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class InterProbCrossEntropyLossNew(torch.nn.Module): def __init__(self, in_features, num_classes): super(InterProbCrossEntropyLossNew, self).__init__() self.in_features = in_features self.num_classes = num_classes self.fc = torch.nn.Linear(in_features, num_classes) def pack_init_args(self): args = {'in_features': self.in_features, 'num_classes': self. num_classes} return args def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
tkc-morita/secl
InterProbCrossEntropyLoss
false
10,951
[ "MIT" ]
0
d0156cea4fd95ea5071126dbf076a6da69752a37
https://github.com/tkc-morita/secl/tree/d0156cea4fd95ea5071126dbf076a6da69752a37
BerhuLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.hub class BerhuLoss(nn.Module): def __init__(self): super(BerhuLoss, self).__init__() self.name = 'Berhu' def forward(self, input, target, mask=None): assert input.shape == target.shape if mask is not None: input = input[mask] target = target[mask] diff = torch.abs(input - target) c = 0.2 * torch.max(diff) diff_square = (torch.square(diff) + torch.square(c)) / (2 * c) diff_square[diff <= c] = 0 diff_copy = diff.clone() diff_copy[diff_copy > c] = 0 diff_copy += diff_square loss = torch.mean(diff_copy) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_index_put_lift_fresh_max_mean_mul_pow_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp4, 0)) tmp7 = 0.2 tmp8 = tmp6 * tmp7 tmp9 = tmp3 > tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp9, tmp10, tmp3) tmp12 = tmp3 <= tmp8 tmp13 = tmp3 * tmp3 tmp14 = tmp8 * tmp8 tmp15 = tmp13 + tmp14 tmp16 = 2.0 tmp17 = tmp8 * tmp16 tmp18 = tmp15 / tmp17 tmp19 = tl.where(tmp12, tmp10, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp23 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 get_raw_stream(0) triton_per_fused_abs_add_div_index_put_lift_fresh_max_mean_mul_pow_sub_0[ grid(1)](buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class BerhuLossNew(nn.Module): def __init__(self): super(BerhuLossNew, self).__init__() self.name = 'Berhu' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
appliedinnovation/fast-depth
BerhuLoss
false
1,458
[ "MIT" ]
0
4606b4d340ae416de94afed45bc767fe6f64bd67
https://github.com/appliedinnovation/fast-depth/tree/4606b4d340ae416de94afed45bc767fe6f64bd67
AttentionBranch
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oi/coicd2ifdvyuyi5ulyhgr5myfbr3tlo7wxjhqh3revbeg2pr3g5k.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # y_1 => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zm/czmslukeffgdmmnc25pklpku4kieqy5lokt7g7r36bd2qo3eeeme.py # Topologically Sorted Source Nodes: [y_2, y_3, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # out => mul_1 # y_2 => convolution_1 # y_3 => sigmoid # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %sigmoid), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf1, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2, y_3, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] triton_poi_fused_convolution_mul_sigmoid_1.run(buf3, primals_4, buf4, buf5, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) return (buf6, primals_1, primals_2, primals_3, primals_5, primals_6, buf1, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_mul_sigmoid_1[grid(256)](buf3, primals_4, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) return (buf6, primals_1, primals_2, primals_3, primals_5, primals_6, buf1, buf3, buf4, buf5) class AttentionBranchNew(nn.Module): """Attention Branch.""" def __init__(self, nf, k_size=3): super(AttentionBranchNew, self).__init__() self.k1 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.k2 = nn.Conv2d(nf, nf, 1) self.sigmoid = nn.Sigmoid() self.k3 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) self.k4 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) def forward(self, input_0): primals_1 = self.k1.weight primals_3 = self.k2.weight primals_4 = self.k2.bias primals_5 = self.k3.weight primals_6 = self.k4.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
grofit/traiNNer
AttentionBranch
false
15,461
[ "Apache-2.0" ]
78
12d006fd44ed304e4178839c53b1f3d95ca25dcb
https://github.com/grofit/traiNNer/tree/12d006fd44ed304e4178839c53b1f3d95ca25dcb
Ln_distance
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/by/cby5kj263blyqgiihr7dzulbjsup7h2tnto4rdtait5s4uy5oywr.py # Topologically Sorted Source Nodes: [d, abs_1, pow_1, sum_1, pow_2], Original ATen: [aten.sub, aten.abs, aten.pow, aten.sum] # Source node to ATen node mapping: # abs_1 => abs_1 # d => sub # pow_1 => pow_1 # pow_2 => pow_2 # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {}) triton_per_fused_abs_pow_sub_sum_0 = async_compile.triton('triton_per_fused_abs_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 0.25 tmp11 = libdevice.pow(tmp9, tmp10) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [d, abs_1, pow_1, sum_1, pow_2], Original ATen: [aten.sub, aten.abs, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 0.25 tmp11 = libdevice.pow(tmp9, tmp10) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_pow_sub_sum_0[grid(4)](buf1, arg0_1, arg1_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class Ln_distanceNew(nn.Module): """If dims is None Compute across all dimensions except first""" def __init__(self, n, dim=None): super(Ln_distanceNew, self).__init__() self.n = n self.dim = dim def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
carla-recourse/CARLA
Ln_distance
false
15,000
[ "MIT" ]
140
e9bb3152598a94e700c38d7377825054959dcf48
https://github.com/carla-recourse/CARLA/tree/e9bb3152598a94e700c38d7377825054959dcf48
Contract
import torch import torch.nn as nn import torch.utils.data class Contract(nn.Module): def __init__(self, gain=2): super().__init__() self.gain = gain def forward(self, x): N, C, H, W = x.size() s = self.gain x = x.view(N, C, H // s, s, W // s, s) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() return x.view(N, C * s * s, H // s, W // s) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex % 2 x4 = xindex // 2 y0 = yindex % 2 y1 = yindex // 2 % 2 y2 = yindex // 4 x6 = xindex y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x3 + 4 * y1 + 8 * x4 + 64 * y2), xmask & ymask) tl.store(out_ptr0 + (x6 + 16 * y5), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 2, 4, 2, 2), (64, 32, 16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ContractNew(nn.Module): def __init__(self, gain=2): super().__init__() self.gain = gain def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Arui66/FPSAutomaticAiming
Contract
false
13,303
[ "Apache-2.0" ]
129
87674385d42b065b984b38a2ff59e7f2d4f07dc9
https://github.com/Arui66/FPSAutomaticAiming/tree/87674385d42b065b984b38a2ff59e7f2d4f07dc9
Hidden2Normal
import torch class Hidden2Normal(torch.nn.Module): def __init__(self, hidden_dim): super(Hidden2Normal, self).__init__() self.linear = torch.nn.Linear(hidden_dim, 5) def forward(self, hidden_state): normal = self.linear(hidden_state) normal[:, 2] = 0.01 + 0.2 * torch.sigmoid(normal[:, 2]) normal[:, 3] = 0.01 + 0.2 * torch.sigmoid(normal[:, 3]) normal[:, 4] = 0.7 * torch.sigmoid(normal[:, 4]) return normal def get_inputs(): return [torch.rand([4, 5, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_copy_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 20 % 5 x0 = xindex % 20 x2 = xindex // 100 x3 = xindex tmp3 = tl.load(in_ptr0 + (40 + x0 + 100 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 0.2 tmp6 = tmp4 * tmp5 tmp7 = 0.01 tmp8 = tmp6 + tmp7 tmp10 = tl.where(tmp2, tmp8, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_select_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 % 4 x2 = xindex // 20 x3 = xindex tmp0 = tl.load(in_ptr0 + (60 + x0 + 5 * x1 + 100 * x2 + 100 * ((12 + x1 ) // 20)), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_copy_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 20 % 5 x3 = xindex // 100 x4 = xindex % 20 x6 = xindex tmp3 = tl.load(in_ptr0 + (x4 + 20 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + x6, xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 0.2 tmp6 = tmp4 * tmp5 tmp7 = 0.01 tmp8 = tmp6 + tmp7 tmp10 = tl.where(tmp2, tmp8, tmp9) tl.store(out_ptr0 + x6, tmp10, xmask) @triton.jit def triton_poi_fused_select_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 % 4 x2 = xindex // 20 x3 = xindex tmp0 = tl.load(in_ptr0 + (80 + x0 + 5 * x1 + 100 * x2 + 100 * ((16 + x1 ) // 20)), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_copy_mul_sigmoid_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 20 % 5 x3 = xindex // 100 x5 = xindex % 20 x6 = xindex tmp3 = tl.load(in_ptr0 + (x5 + 20 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + x6, xmask) tmp0 = x2 tmp1 = tl.full([1], 4, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 0.7 tmp6 = tmp4 * tmp5 tmp8 = tl.where(tmp2, tmp6, tmp7) tl.store(in_out_ptr0 + x6, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (5, 4), (4, 1)) assert_size_stride(primals_2, (5,), (1,)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 5, 4, 5), (100, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_copy_mul_sigmoid_0[grid(400)](buf0, buf1, 400, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_select_1[grid(80)](buf1, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 5, 4, 5), (100, 20, 5, 1), torch.float32) triton_poi_fused_add_copy_mul_sigmoid_2[grid(400)](buf2, buf1, buf3, 400, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_select_3[grid(80)](buf3, buf4, 80, XBLOCK=128, num_warps=4, num_stages=1) buf5 = buf1 del buf1 buf6 = buf5 del buf5 triton_poi_fused_copy_mul_sigmoid_4[grid(400)](buf6, buf4, buf3, 400, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf6, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 5), (100, 5, 1), 40), buf2, buf4 class Hidden2NormalNew(torch.nn.Module): def __init__(self, hidden_dim): super(Hidden2NormalNew, self).__init__() self.linear = torch.nn.Linear(hidden_dim, 5) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JosephGesnouin/Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories
Hidden2Normal
false
17,512
[ "MIT" ]
9
488924e938fc1674b5a0d2cb9f05178cad8de561
https://github.com/JosephGesnouin/Asymmetrical-Bi-RNNs-to-encode-pedestrian-trajectories/tree/488924e938fc1674b5a0d2cb9f05178cad8de561
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/qg/cqgejm4wp2tvoab2fpggn6ygzxekamtcj65undyfrstdf2jttwb4.py # Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # dice => div # intersection => sum_1 # mul => mul # mul_1 => mul_1 # sub => sub # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = tmp15 / tmp17 tmp19 = tmp14 - tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = tmp15 / tmp17 tmp19 = tmp14 - tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLossNew(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Charbel199/Oil-Spill-Thickness-Estimation
DiceLoss
false
8,927
[ "MIT" ]
0
dd600f6da611461f3b8072389bc34e6285109246
https://github.com/Charbel199/Oil-Spill-Thickness-Estimation/tree/dd600f6da611461f3b8072389bc34e6285109246
FirstLSTMAmp
import torch import torch.nn as nn class FirstLSTMAmp(nn.Module): """ First LSTM amplifier branch. Parameters: ---------- in_features : int Number of input channels. out_features : int Number of output channels. """ def __init__(self, in_features, out_features): super(FirstLSTMAmp, self).__init__() mid_features = in_features // 4 self.fc1 = nn.Linear(in_features=in_features, out_features=mid_features ) self.activ = nn.ReLU(inplace=True) self.fc2 = nn.Linear(in_features=mid_features, out_features= out_features) def forward(self, x): x = self.fc1(x) x = self.activ(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 % 4) + x0 % 4) // 16)), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, primals_4, buf4 class FirstLSTMAmpNew(nn.Module): """ First LSTM amplifier branch. Parameters: ---------- in_features : int Number of input channels. out_features : int Number of output channels. """ def __init__(self, in_features, out_features): super(FirstLSTMAmpNew, self).__init__() mid_features = in_features // 4 self.fc1 = nn.Linear(in_features=in_features, out_features=mid_features ) self.activ = nn.ReLU(inplace=True) self.fc2 = nn.Linear(in_features=mid_features, out_features= out_features) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
iofthetiger/pkuad
FirstLSTMAmp
false
6,898
[ "Apache-2.0" ]
1
07496d108c614c84be028f344830becc9cac8fe5
https://github.com/iofthetiger/pkuad/tree/07496d108c614c84be028f344830becc9cac8fe5
DICELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bl/cblebqnk52ztwn7chut2o6zbqx2yl3jtniky25kwqrbzjg6e7444.py # Topologically Sorted Source Nodes: [intersection, intersection_1, intersection_2, add, den1, den1_1, den1_2, den2, den2_1, den2_2, add_1, add_2, truediv, dice, sum_7, truediv_1, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # den1 => mul_1 # den1_1 => sum_3 # den1_2 => sum_4 # den2 => mul_2 # den2_1 => sum_5 # den2_2 => sum_6 # dice => mul_3 # intersection => mul # intersection_1 => sum_1 # intersection_2 => sum_2 # loss => sub # sum_7 => sum_7 # truediv => div # truediv_1 => div_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %squeeze_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-08), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %squeeze), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %squeeze_1), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [2]), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_5, [1]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, %sum_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_7, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = (rindex // 4) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (4 + r0 + (64*r1)), None) tmp4 = tl.load(in_ptr1 + (4 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (8 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr1 + (8 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr0 + (12 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (12 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp18 = tl.load(in_ptr0 + (20 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr1 + (20 + r0 + (64*r1)), None) tmp22 = tl.load(in_ptr0 + (24 + r0 + (64*r1)), None) tmp23 = tl.load(in_ptr1 + (24 + r0 + (64*r1)), None) tmp26 = tl.load(in_ptr0 + (28 + r0 + (64*r1)), None) tmp27 = tl.load(in_ptr1 + (28 + r0 + (64*r1)), None) tmp31 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp32 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp34 = tl.load(in_ptr0 + (36 + r0 + (64*r1)), None) tmp35 = tl.load(in_ptr1 + (36 + r0 + (64*r1)), None) tmp38 = tl.load(in_ptr0 + (40 + r0 + (64*r1)), None) tmp39 = tl.load(in_ptr1 + (40 + r0 + (64*r1)), None) tmp42 = tl.load(in_ptr0 + (44 + r0 + (64*r1)), None) tmp43 = tl.load(in_ptr1 + (44 + r0 + (64*r1)), None) tmp47 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp48 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp50 = tl.load(in_ptr0 + (52 + r0 + (64*r1)), None) tmp51 = tl.load(in_ptr1 + (52 + r0 + (64*r1)), None) tmp54 = tl.load(in_ptr0 + (56 + r0 + (64*r1)), None) tmp55 = tl.load(in_ptr1 + (56 + r0 + (64*r1)), None) tmp58 = tl.load(in_ptr0 + (60 + r0 + (64*r1)), None) tmp59 = tl.load(in_ptr1 + (60 + r0 + (64*r1)), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = tmp0 * tmp0 tmp64 = tmp3 * tmp3 tmp65 = tmp63 + tmp64 tmp66 = tmp7 * tmp7 tmp67 = tmp65 + tmp66 tmp68 = tmp11 * tmp11 tmp69 = tmp67 + tmp68 tmp70 = tmp15 * tmp15 tmp71 = tmp18 * tmp18 tmp72 = tmp70 + tmp71 tmp73 = tmp22 * tmp22 tmp74 = tmp72 + tmp73 tmp75 = tmp26 * tmp26 tmp76 = tmp74 + tmp75 tmp77 = tmp69 + tmp76 tmp78 = tmp31 * tmp31 tmp79 = tmp34 * tmp34 tmp80 = tmp78 + tmp79 tmp81 = tmp38 * tmp38 tmp82 = tmp80 + tmp81 tmp83 = tmp42 * tmp42 tmp84 = tmp82 + tmp83 tmp85 = tmp77 + tmp84 tmp86 = tmp47 * tmp47 tmp87 = tmp50 * tmp50 tmp88 = tmp86 + tmp87 tmp89 = tmp54 * tmp54 tmp90 = tmp88 + tmp89 tmp91 = tmp58 * tmp58 tmp92 = tmp90 + tmp91 tmp93 = tmp85 + tmp92 tmp94 = tmp1 * tmp1 tmp95 = tmp4 * tmp4 tmp96 = tmp94 + tmp95 tmp97 = tmp8 * tmp8 tmp98 = tmp96 + tmp97 tmp99 = tmp12 * tmp12 tmp100 = tmp98 + tmp99 tmp101 = tmp16 * tmp16 tmp102 = tmp19 * tmp19 tmp103 = tmp101 + tmp102 tmp104 = tmp23 * tmp23 tmp105 = tmp103 + tmp104 tmp106 = tmp27 * tmp27 tmp107 = tmp105 + tmp106 tmp108 = tmp100 + tmp107 tmp109 = tmp32 * tmp32 tmp110 = tmp35 * tmp35 tmp111 = tmp109 + tmp110 tmp112 = tmp39 * tmp39 tmp113 = tmp111 + tmp112 tmp114 = tmp43 * tmp43 tmp115 = tmp113 + tmp114 tmp116 = tmp108 + tmp115 tmp117 = tmp48 * tmp48 tmp118 = tmp51 * tmp51 tmp119 = tmp117 + tmp118 tmp120 = tmp55 * tmp55 tmp121 = tmp119 + tmp120 tmp122 = tmp59 * tmp59 tmp123 = tmp121 + tmp122 tmp124 = tmp116 + tmp123 tmp125 = 1e-08 tmp126 = tmp62 + tmp125 tmp127 = tmp93 + tmp124 tmp128 = tmp127 + tmp125 tmp129 = tmp126 / tmp128 tmp130 = 2.0 tmp131 = tmp129 * tmp130 tmp132 = tl.broadcast_to(tmp131, [XBLOCK, RBLOCK]) tmp134 = tl.sum(tmp132, 1)[:, None] tmp135 = 0.25 tmp136 = tmp134 * tmp135 tmp137 = 1.0 tmp138 = tmp137 - tmp136 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp138, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [intersection, intersection_1, intersection_2, add, den1, den1_1, den1_2, den2, den2_1, den2_2, add_1, add_2, truediv, dice, sum_7, truediv_1, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0.run(buf4, arg0_1, arg1_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (4 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (4 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (8 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr1 + (8 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (12 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (12 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr0 + (20 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr1 + (20 + r0 + 64 * r1), None) tmp22 = tl.load(in_ptr0 + (24 + r0 + 64 * r1), None) tmp23 = tl.load(in_ptr1 + (24 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr0 + (28 + r0 + 64 * r1), None) tmp27 = tl.load(in_ptr1 + (28 + r0 + 64 * r1), None) tmp31 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp32 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp34 = tl.load(in_ptr0 + (36 + r0 + 64 * r1), None) tmp35 = tl.load(in_ptr1 + (36 + r0 + 64 * r1), None) tmp38 = tl.load(in_ptr0 + (40 + r0 + 64 * r1), None) tmp39 = tl.load(in_ptr1 + (40 + r0 + 64 * r1), None) tmp42 = tl.load(in_ptr0 + (44 + r0 + 64 * r1), None) tmp43 = tl.load(in_ptr1 + (44 + r0 + 64 * r1), None) tmp47 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp48 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp50 = tl.load(in_ptr0 + (52 + r0 + 64 * r1), None) tmp51 = tl.load(in_ptr1 + (52 + r0 + 64 * r1), None) tmp54 = tl.load(in_ptr0 + (56 + r0 + 64 * r1), None) tmp55 = tl.load(in_ptr1 + (56 + r0 + 64 * r1), None) tmp58 = tl.load(in_ptr0 + (60 + r0 + 64 * r1), None) tmp59 = tl.load(in_ptr1 + (60 + r0 + 64 * r1), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = tmp0 * tmp0 tmp64 = tmp3 * tmp3 tmp65 = tmp63 + tmp64 tmp66 = tmp7 * tmp7 tmp67 = tmp65 + tmp66 tmp68 = tmp11 * tmp11 tmp69 = tmp67 + tmp68 tmp70 = tmp15 * tmp15 tmp71 = tmp18 * tmp18 tmp72 = tmp70 + tmp71 tmp73 = tmp22 * tmp22 tmp74 = tmp72 + tmp73 tmp75 = tmp26 * tmp26 tmp76 = tmp74 + tmp75 tmp77 = tmp69 + tmp76 tmp78 = tmp31 * tmp31 tmp79 = tmp34 * tmp34 tmp80 = tmp78 + tmp79 tmp81 = tmp38 * tmp38 tmp82 = tmp80 + tmp81 tmp83 = tmp42 * tmp42 tmp84 = tmp82 + tmp83 tmp85 = tmp77 + tmp84 tmp86 = tmp47 * tmp47 tmp87 = tmp50 * tmp50 tmp88 = tmp86 + tmp87 tmp89 = tmp54 * tmp54 tmp90 = tmp88 + tmp89 tmp91 = tmp58 * tmp58 tmp92 = tmp90 + tmp91 tmp93 = tmp85 + tmp92 tmp94 = tmp1 * tmp1 tmp95 = tmp4 * tmp4 tmp96 = tmp94 + tmp95 tmp97 = tmp8 * tmp8 tmp98 = tmp96 + tmp97 tmp99 = tmp12 * tmp12 tmp100 = tmp98 + tmp99 tmp101 = tmp16 * tmp16 tmp102 = tmp19 * tmp19 tmp103 = tmp101 + tmp102 tmp104 = tmp23 * tmp23 tmp105 = tmp103 + tmp104 tmp106 = tmp27 * tmp27 tmp107 = tmp105 + tmp106 tmp108 = tmp100 + tmp107 tmp109 = tmp32 * tmp32 tmp110 = tmp35 * tmp35 tmp111 = tmp109 + tmp110 tmp112 = tmp39 * tmp39 tmp113 = tmp111 + tmp112 tmp114 = tmp43 * tmp43 tmp115 = tmp113 + tmp114 tmp116 = tmp108 + tmp115 tmp117 = tmp48 * tmp48 tmp118 = tmp51 * tmp51 tmp119 = tmp117 + tmp118 tmp120 = tmp55 * tmp55 tmp121 = tmp119 + tmp120 tmp122 = tmp59 * tmp59 tmp123 = tmp121 + tmp122 tmp124 = tmp116 + tmp123 tmp125 = 1e-08 tmp126 = tmp62 + tmp125 tmp127 = tmp93 + tmp124 tmp128 = tmp127 + tmp125 tmp129 = tmp126 / tmp128 tmp130 = 2.0 tmp131 = tmp129 * tmp130 tmp132 = tl.broadcast_to(tmp131, [XBLOCK, RBLOCK]) tmp134 = tl.sum(tmp132, 1)[:, None] tmp135 = 0.25 tmp136 = tmp134 * tmp135 tmp137 = 1.0 tmp138 = tmp137 - tmp136 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp138, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf4, arg0_1, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class DICELossNew(nn.Module): def __init__(self): super(DICELossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Rehan-Ahmar/UNet-Zoo
DICELoss
false
14,300
[ "MIT" ]
345
630f9290d487fda828e7118a3d953575b27a2686
https://github.com/Rehan-Ahmar/UNet-Zoo/tree/630f9290d487fda828e7118a3d953575b27a2686
Aggregation
import torch from torch import nn from torch.nn import * class Aggregation(nn.Module): """ Aggregation layer for the Dueling architecture. https://arxiv.org/abs/1511.06581 This layer computes a Q function by combining an estimate of V with an estimate of the advantage. The advantage is normalized by subtracting the average advantage so that we can properly """ def forward(self, value, advantages): return value + advantages - torch.mean(advantages, dim=1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = 4.0 tmp11 = tmp9 / tmp10 tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class AggregationNew(nn.Module): """ Aggregation layer for the Dueling architecture. https://arxiv.org/abs/1511.06581 This layer computes a Q function by combining an estimate of V with an estimate of the advantage. The advantage is normalized by subtracting the average advantage so that we can properly """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kcorder/autonomous-learning-library
Aggregation
false
15,796
[ "MIT" ]
584
0266195fa47564e51a32087bc007bff6dda5e263
https://github.com/kcorder/autonomous-learning-library/tree/0266195fa47564e51a32087bc007bff6dda5e263
DuelingQnet
import random import torch import torch.nn as nn import torch.nn.functional as F class DuelingQnet(nn.Module): def __init__(self, actions=2): super(DuelingQnet, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc_value = nn.Linear(128, 128) self.fc_adv = nn.Linear(128, 128) self.value = nn.Linear(128, 1) self.adv = nn.Linear(128, actions) def forward(self, x): x = F.relu(self.fc1(x)) v = F.relu(self.fc_value(x)) a = F.relu(self.fc_adv(x)) v = self.value(v) a = self.adv(a) a_avg = torch.mean(a) q = v + a - a_avg return q def sample_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_per_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r2 = rindex // 2 tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp7 = tmp4 + tmp6 tmp8 = tmp7 + tmp0 tmp9 = 128.0 tmp10 = tmp3 / tmp9 tmp11 = tmp8 - tmp10 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp11, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128), (128, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (1, 128), (128, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (2, 128), (128, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf12, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 128), (1, 128), 0), out=buf3) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf11 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf4, primals_5, buf11, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 1), (1, 128), 0), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf3 buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6, primals_7, buf10, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (64, 128), (128, 1), 0), reinterpret_tensor(primals_10, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf7) del primals_11 buf9 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_per_fused_add_mean_sub_1[grid(1)](buf7, buf5, primals_9, buf9, 1, 128, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del buf7 del primals_9 return buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf4, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf6, (64, 128), (128, 1), 0 ), primals_10, primals_8, buf10, primals_6, buf11, primals_4, buf12 class DuelingQnetNew(nn.Module): def __init__(self, actions=2): super(DuelingQnetNew, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc_value = nn.Linear(128, 128) self.fc_adv = nn.Linear(128, 128) self.value = nn.Linear(128, 1) self.adv = nn.Linear(128, actions) def sample_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc_value.weight primals_5 = self.fc_value.bias primals_6 = self.fc_adv.weight primals_7 = self.fc_adv.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_10 = self.adv.weight primals_11 = self.adv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
DeepHaeJoong/reinforcement-learning
DuelingQnet
false
9,033
[ "MIT" ]
0
63e3053e3209809e67e97d51adaf5f85ce3799ba
https://github.com/DeepHaeJoong/reinforcement-learning/tree/63e3053e3209809e67e97d51adaf5f85ce3799ba
Adder2D
from torch.autograd import Function import math import torch import torch.nn as nn from torch.autograd.function import Function def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2D(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2D, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd import Function import math import torch.nn as nn from torch.autograd.function import Function assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_abs_sub_sum_1[grid(16)](primals_1, buf0, buf1, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0 ) def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2DNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2DNew, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, input_0): primals_1 = self.adder primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
poppin-mice/ShiftAddNet
Adder2D
false
16,268
[ "MIT" ]
55
a17369a50da5bba6250fdeac7c065bd00f293f3c
https://github.com/poppin-mice/ShiftAddNet/tree/a17369a50da5bba6250fdeac7c065bd00f293f3c
DiceCoeffLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/jl/cjlrtm4sxygj4okzoufxsrs4ibv3mkxn7bvqn4f7vhcm3jj36mys.py # Topologically Sorted Source Nodes: [inter, mul, add_2, sum_1, sum_2, add, union, t], Original ATen: [aten.dot, aten.mul, aten.add, aten.sum, aten.div] # Source node to ATen node mapping: # add => add # add_2 => add_2 # inter => mul, sum_1 # mul => mul_1 # sum_1 => sum_2 # sum_2 => sum_3 # t => div # union => add_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0.0001), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 0.0001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_1), kwargs = {}) triton_per_fused_add_div_dot_mul_sum_0 = async_compile.triton('triton_per_fused_add_div_dot_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_dot_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = tmp15 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [inter, mul, add_2, sum_1, sum_2, add, union, t], Original ATen: [aten.dot, aten.mul, aten.add, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_dot_mul_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_dot_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = tmp15 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_dot_mul_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceCoeffLossNew(nn.Module): def __init__(self, eps: 'float'=0.0001): super(DiceCoeffLossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZiyunClaudeWang/e3d
DiceCoeffLoss
false
18,224
[ "MIT" ]
9
2efd01167350c29423babb6233907fa54156268f
https://github.com/ZiyunClaudeWang/e3d/tree/2efd01167350c29423babb6233907fa54156268f
IIDIsotropicGaussianUVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/57/c57fbdxlpuego6d4edvsufdvj2wgfjgio4escf24ktyiq3s7ahqd.py # Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum] # Source node to ATen node mapping: # add_2 => add_2 # add_3 => add_3 # delta_t_delta => add_1 # log => log # loss => mul_1 # mul => mul # pow_1 => pow_1 # pow_2 => pow_2 # sigma2 => add # softplus => exp, gt, log1p, where # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # truediv => div # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.8378770664093453), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %div), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp13 = tl.load(in_ptr1 + (r0), None) tmp14 = tl.load(in_ptr2 + (r0), None) tmp17 = tl.load(in_ptr3 + (r0), None) tmp18 = tl.load(in_ptr4 + (r0), None) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 4.0 tmp7 = tmp5 + tmp6 tmp8 = tl_math.log(tmp7) tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = 1.8378770664093453 tmp12 = tmp10 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp16 + tmp20 tmp22 = tmp21 / tmp7 tmp23 = tmp12 + tmp22 tmp24 = 0.5 tmp25 = tmp23 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp13 = tl.load(in_ptr1 + r0, None) tmp14 = tl.load(in_ptr2 + r0, None) tmp17 = tl.load(in_ptr3 + r0, None) tmp18 = tl.load(in_ptr4 + r0, None) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 4.0 tmp7 = tmp5 + tmp6 tmp8 = tl_math.log(tmp7) tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = 1.8378770664093453 tmp12 = tmp10 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp16 + tmp20 tmp22 = tmp21 / tmp7 tmp23 = tmp12 + tmp22 tmp24 = 0.5 tmp25 = tmp23 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1 , arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return buf0, class IIDIsotropicGaussianUVLossNew(nn.Module): """ Loss for the case of iid residuals with isotropic covariance: $Sigma_i = sigma_i^2 I$ The loss (negative log likelihood) is then: $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates difference between estimated and ground truth UV values For details, see: N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 """ def __init__(self, sigma_lower_bound: 'float'): super(IIDIsotropicGaussianUVLossNew, self).__init__() self.sigma_lower_bound = sigma_lower_bound self.log2pi = math.log(2 * math.pi) def forward(self, input_0, input_1, input_2, input_3, input_4): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
AbirKhan96/facebook-detectron2
IIDIsotropicGaussianUVLoss
false
16,861
[ "Apache-2.0" ]
5
6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87
SelfAttentionPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xj/cxjhy4b2emwxu6foyq65uamx25utrr6yabi4u4clztnbed2dydcn.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_0 = async_compile.triton('triton_poi_fused__softmax_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/no/cnoe2ucpkq6j5eael53gjc3t3cs46utcvmwkmw73j6lruo6fspxe.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_add_0.run(primals_4, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_1.run(primals_1, primals_4, buf1, buf2, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 return (buf4, primals_1, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_0[grid(64)](primals_4, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(256)](primals_1, primals_4, buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 return buf4, primals_1, primals_4, buf1 class SelfAttentionPoolingNew(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPoolingNew, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_2 = self.W.weight primals_3 = self.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
sumanthd17/s3prl
SelfAttentionPooling
false
12,999
[ "MIT" ]
0
bb74c705295d121c4308ceb6b6a2c8d1814d6f4c
https://github.com/sumanthd17/s3prl/tree/bb74c705295d121c4308ceb6b6a2c8d1814d6f4c
ManifoldPropagation
import torch import torch.nn as nn import torch.nn.functional as F def shift(x, direction, amount): if direction == 'left': ret = F.pad(x, (amount, 0, 0, 0, 0, 0, 0, 0))[:, :, :, :-amount] elif direction == 'right': ret = F.pad(x, (0, amount, 0, 0, 0, 0, 0, 0))[:, :, :, amount:] elif direction == 'top': ret = F.pad(x, (0, 0, amount, 0, 0, 0, 0, 0))[:, :, :-amount, :] elif direction == 'bottom': ret = F.pad(x, (0, 0, 0, amount, 0, 0, 0, 0))[:, :, amount:, :] else: raise return ret class ManifoldPropagation(nn.Module): def __init__(self, ic, k_hop=3, stride=1): super().__init__() self.k = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.q = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.v = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.k_hop = k_hop self.stride = stride self.normalize = nn.Softmax(dim=1) self.aggregate = nn.Conv2d(ic, ic, kernel_size=1) def forward(self, x): k = self.k(x) q = self.q(x) v = self.v(x) _batch_size, _channel, _h, _w = x.shape xl = shift(k, 'left', self.stride) xr = shift(k, 'right', self.stride) xt = shift(k, 'top', self.stride) xb = shift(k, 'bottom', self.stride) l = (q * xl).sum(1, keepdim=True) r = (q * xr).sum(1, keepdim=True) t = (q * xt).sum(1, keepdim=True) b = (q * xb).sum(1, keepdim=True) m = torch.ones_like(l) A = self.normalize(torch.cat((l, r, t, b, m), dim=1)) l = A[:, 0:1] r = A[:, 1:2] t = A[:, 2:3] b = A[:, 3:4] m = A[:, 4:5] for _ in range(self.k_hop): v = self.propagation(v, l, r, t, b, m) v = self.aggregate(v) return v def propagation(self, x, l, r, t, b, m): p = l * shift(x, 'right', self.stride) + r * shift(x, 'left', self. stride) + t * shift(x, 'bottom', self.stride) + b * shift(x, 'top', self.stride) + m * x return p def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ic': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x6 = xindex // 5 x2 = xindex // 20 % 4 x7 = xindex x5 = xindex // 4 % 5 x8 = xindex // 20 x9 = xindex % 20 tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x6), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + x2, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tmp8 = x0 tmp9 = tl.full([1], 4, tl.int64) tmp10 = tmp8 < tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 4 * x6), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + x2, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = -1 + x5 tmp17 = tmp16 >= tmp1 tmp18 = tl.load(in_ptr0 + (-4 + x9 + 16 * x8), tmp17 & xmask, other=0.0) tmp19 = tl.load(in_ptr1 + x2, tmp17 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp17, tmp20, tmp21) tmp23 = x5 tmp24 = tmp23 < tmp9 tmp25 = tl.load(in_ptr0 + (x9 + 16 * x8), tmp24 & xmask, other=0.0) tmp26 = tl.load(in_ptr1 + x2, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 + tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tl.store(out_ptr0 + x7, tmp7, xmask) tl.store(out_ptr1 + x7, tmp15, xmask) tl.store(out_ptr2 + x7, tmp22, xmask) tl.store(out_ptr3 + x7, tmp29, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 5 x3 = xindex // 80 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 5 * x1 + 80 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (20 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr1 + (40 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp12 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + (60 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tmp15 + tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 2, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp26 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tl.load(in_ptr2 + (1 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tl.load(in_ptr2 + (21 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tl.load(in_ptr2 + (41 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp37 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr2 + (61 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp25, tmp40, tmp41) tmp43 = tmp0 >= tmp23 tmp44 = tl.full([1], 3, tl.int64) tmp45 = tmp0 < tmp44 tmp46 = tmp43 & tmp45 tmp47 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tl.load(in_ptr3 + (x4 + 80 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tmp47 * tmp48 tmp50 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr3 + (20 + x4 + 80 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp54 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp55 = tl.load(in_ptr3 + (40 + x4 + 80 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp59 = tl.load(in_ptr3 + (60 + x4 + 80 * x3), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype) tmp63 = tl.where(tmp46, tmp61, tmp62) tmp64 = tmp0 >= tmp44 tmp65 = tl.full([1], 4, tl.int64) tmp66 = tmp0 < tmp65 tmp67 = tmp64 & tmp66 tmp68 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp69 = tl.load(in_ptr4 + (4 + x4 + 80 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp68 * tmp69 tmp71 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp72 = tl.load(in_ptr4 + (24 + x4 + 80 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp73 = tmp71 * tmp72 tmp74 = tmp70 + tmp73 tmp75 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr4 + (44 + x4 + 80 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp77 = tmp75 * tmp76 tmp78 = tmp74 + tmp77 tmp79 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp80 = tl.load(in_ptr4 + (64 + x4 + 80 * x3), tmp67 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tmp79 * tmp80 tmp82 = tmp78 + tmp81 tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype) tmp84 = tl.where(tmp67, tmp82, tmp83) tmp85 = tmp0 >= tmp65 tl.full([1], 5, tl.int64) tmp88 = 1.0 tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tl.where(tmp67, tmp84, tmp90) tmp92 = tl.where(tmp46, tmp63, tmp91) tmp93 = tl.where(tmp25, tmp42, tmp92) tmp94 = tl.where(tmp4, tmp21, tmp93) tl.store(out_ptr0 + x5, tmp94, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 80 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 80 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 80 * x1), xmask) tmp7 = tl.load(in_ptr0 + (64 + x0 + 80 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tmp1 - tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp8 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 80 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 / tmp4 tl.store(in_out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x2 = xindex // 20 x3 = xindex % 20 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp3, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x2 = xindex // 20 x3 = xindex % 20 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp3, xmask) @triton.jit def triton_poi_fused_add_mul_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x0 = xindex % 4 x5 = xindex // 4 x6 = xindex // 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (1 + x0 + 5 * x5), xmask) tmp3 = tl.load(in_ptr0 + (16 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + (x0 + 5 * x5), xmask) tmp7 = tl.load(in_ptr0 + (32 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr3 + (4 + x4 + 20 * x6), xmask) tmp11 = tl.load(in_ptr0 + (48 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr4 + (x4 + 20 * x6), xmask) tmp15 = tl.load(in_ptr0 + (64 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr5 + x7, xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(out_ptr0 + x7, tmp18, xmask) @triton.jit def triton_poi_fused_add_mul_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x0 = xindex % 4 x5 = xindex // 4 x6 = xindex // 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (1 + x0 + 5 * x5), xmask) tmp3 = tl.load(in_ptr0 + (16 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + (x0 + 5 * x5), xmask) tmp7 = tl.load(in_ptr0 + (32 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr3 + (4 + x4 + 20 * x6), xmask) tmp11 = tl.load(in_ptr0 + (48 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr4 + (x4 + 20 * x6), xmask) tmp15 = tl.load(in_ptr0 + (64 + x4 + 80 * x3), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_out_ptr0 + x7, xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(in_out_ptr0 + x7, tmp18, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf2, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf3 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(320)](buf0, primals_2, buf5, buf6, buf7, buf8, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf9 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) triton_poi_fused_cat_2[grid(320)](buf2, buf5, buf6, buf7, buf8, buf9, 320, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf9, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf9 del buf9 triton_poi_fused__softmax_4[grid(320)](buf12, buf10, buf11, 320, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del buf11 buf13 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_5[grid(320)](buf4, buf13, 320, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_6[grid(320)](buf4, buf14, 320, XBLOCK=128, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_7[grid(320)](buf4, buf15, 320, XBLOCK=256, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_8[grid(320)](buf4, buf16, 320, XBLOCK=128, num_warps=4, num_stages=1) buf17 = buf0 del buf0 triton_poi_fused_add_mul_9[grid(256)](buf12, buf13, buf14, buf15, buf16, buf4, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_5[grid(320)](buf17, buf18, 320, XBLOCK=128, num_warps=4, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_6[grid(320)](buf17, buf19, 320, XBLOCK=128, num_warps=4, num_stages=1) buf20 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_7[grid(320)](buf17, buf20, 320, XBLOCK=256, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_8[grid(320)](buf17, buf21, 320, XBLOCK=128, num_warps=4, num_stages=1) buf22 = buf17 del buf17 triton_poi_fused_add_mul_10[grid(256)](buf22, buf12, buf18, buf19, buf20, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_5[grid(320)](buf22, buf23, 320, XBLOCK=128, num_warps=4, num_stages=1) buf24 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_6[grid(320)](buf22, buf24, 320, XBLOCK=128, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_7[grid(320)](buf22, buf25, 320, XBLOCK=256, num_warps=4, num_stages=1) buf26 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_8[grid(320)](buf22, buf26, 320, XBLOCK=128, num_warps=4, num_stages=1) buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_9[grid(256)](buf12, buf23, buf24, buf25, buf26, buf22, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_0[grid(256)](buf29, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, buf2, buf4, buf5, buf6, buf7, buf8, buf12, buf13, buf14, buf15, buf16, buf18, buf19, buf20, buf21, buf22, buf23, buf24, buf25, buf26, buf27) def shift(x, direction, amount): if direction == 'left': ret = F.pad(x, (amount, 0, 0, 0, 0, 0, 0, 0))[:, :, :, :-amount] elif direction == 'right': ret = F.pad(x, (0, amount, 0, 0, 0, 0, 0, 0))[:, :, :, amount:] elif direction == 'top': ret = F.pad(x, (0, 0, amount, 0, 0, 0, 0, 0))[:, :, :-amount, :] elif direction == 'bottom': ret = F.pad(x, (0, 0, 0, amount, 0, 0, 0, 0))[:, :, amount:, :] else: raise return ret class ManifoldPropagationNew(nn.Module): def __init__(self, ic, k_hop=3, stride=1): super().__init__() self.k = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.q = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.v = nn.Conv2d(ic, ic, kernel_size=1, padding=0) self.k_hop = k_hop self.stride = stride self.normalize = nn.Softmax(dim=1) self.aggregate = nn.Conv2d(ic, ic, kernel_size=1) def propagation(self, x, l, r, t, b, m): p = l * shift(x, 'right', self.stride) + r * shift(x, 'left', self. stride) + t * shift(x, 'bottom', self.stride) + b * shift(x, 'top', self.stride) + m * x return p def forward(self, input_0): primals_1 = self.k.weight primals_2 = self.k.bias primals_4 = self.q.weight primals_5 = self.q.bias primals_6 = self.v.weight primals_7 = self.v.bias primals_8 = self.aggregate.weight primals_9 = self.aggregate.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
wonkyunglee/MPNet
ManifoldPropagation
false
16,758
[ "MIT" ]
1,280
3a6821a88a5e3db5bd97121761dbb361d9518bc2
https://github.com/wonkyunglee/MPNet/tree/3a6821a88a5e3db5bd97121761dbb361d9518bc2
L2loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jh/cjhmorbdlv6z74opl7hgkfvksiemvvmsjf5pztsmoepd5k6tw7r4.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # loss => div # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {}) triton_per_fused_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L2lossNew(torch.nn.Module): def __init__(self): super(L2lossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
btolooshams/densae
L2loss
false
6,365
[ "MIT" ]
1
a1e4c4cc1b4be0386d42136f2695615ea3cf4815
https://github.com/btolooshams/densae/tree/a1e4c4cc1b4be0386d42136f2695615ea3cf4815
Mlp
import torch import torch.nn as nn class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class MlpNew(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ChangeTheWorld20191008/SwinIR
Mlp
false
11,293
[ "Apache-2.0" ]
0
a0cf7330b10e7c7294f11f59e1b89eff973b9093
https://github.com/ChangeTheWorld20191008/SwinIR/tree/a0cf7330b10e7c7294f11f59e1b89eff973b9093
MaxPool1d
import torch import torch.nn as nn class MaxPool1d(nn.Module): def __init__(self, win=2, stride=None, pad=0): super().__init__() self.pooling = nn.MaxPool1d(kernel_size=win, stride=stride, padding=pad ) def forward(self, x): """ Args: x: shape=(batch_size, max_seq_len, dim) """ x = x.permute(0, 2, 1) x = self.pooling(x).permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 2), (8, 1, 32, 4), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(32)](arg0_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 2, 4), (8, 4, 1), 0), class MaxPool1dNew(nn.Module): def __init__(self, win=2, stride=None, pad=0): super().__init__() self.pooling = nn.MaxPool1d(kernel_size=win, stride=stride, padding=pad ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
WiseDoge/Text-Classification-PyTorch
MaxPool1d
false
18,091
[ "MIT" ]
6
9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
https://github.com/WiseDoge/Text-Classification-PyTorch/tree/9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/c5/cc5asg4nd3jaxgpbn6ptpyi6axgsaygnv5pqlnptuo35p2hhtygb.py # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul] # Source node to ATen node mapping: # loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where # mul => mul_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_mul_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SmoothL1LossNew(nn.Module): """SmoothL1Loss loss . Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
SmoothL1Loss
false
12,012
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
resnet_block
import torch import torch.nn as nn import torch.nn.functional as F class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input, is_training=False): if self.dim_in == self.dim_out: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) output = output + input output = F.leaky_relu(output, negative_slope=0.01, inplace=True) else: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) input_ = self.conv_s(input) output = output + input_ output = F.leaky_relu(output, negative_slope=0.01, inplace=True) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_1[grid(256)](buf3, primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, primals_3, buf1, buf4 class resnet_blockNew(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_blockNew, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input_0): primals_1 = self.conv_1.weight primals_3 = self.conv_2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
luixiao1223/BSP-NET-pytorch
resnet_block
false
3,940
[ "MIT" ]
0
f871c8ce6a9d52ac922e110702c47cd1c89d0a73
https://github.com/luixiao1223/BSP-NET-pytorch/tree/f871c8ce6a9d52ac922e110702c47cd1c89d0a73
IoULoss
import torch import torch.nn as nn import torch.nn.functional as F class IoULoss(nn.Module): """ Creates a criterion that computes the Intersection over Union (IoU) between a segmentation mask and its ground truth. Rahman, M.A. and Wang, Y: Optimizing Intersection-Over-Union in Deep Neural Networks for Image Segmentation. International Symposium on Visual Computing (2016) http://www.cs.umanitoba.ca/~ywang/papers/isvc16.pdf """ def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input, target): input = F.sigmoid(input) intersection = (input * target).sum() union = (input + target - input * target).sum() iou = intersection / union iou_dual = input.size(0) - iou if self.size_average: iou_dual = iou_dual / input.size(0) return iou_dual def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp1 + tmp2 tmp8 = tmp7 - tmp3 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp6 / tmp11 tmp13 = 4.0 tmp14 = tmp13 - tmp12 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sigmoid_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class IoULossNew(nn.Module): """ Creates a criterion that computes the Intersection over Union (IoU) between a segmentation mask and its ground truth. Rahman, M.A. and Wang, Y: Optimizing Intersection-Over-Union in Deep Neural Networks for Image Segmentation. International Symposium on Visual Computing (2016) http://www.cs.umanitoba.ca/~ywang/papers/isvc16.pdf """ def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BCV-Uniandes/DMS
IoULoss
false
13,350
[ "MIT" ]
66
9fa3a3a2ef5980dd17e21b73234a4cd0b3d00e16
https://github.com/BCV-Uniandes/DMS/tree/9fa3a3a2ef5980dd17e21b73234a4cd0b3d00e16
ContextgenCNN
import torch import torch.nn as nn import torch.nn.functional as F class ContextgenCNN(nn.Module): def __init__(self, latent_dim=1024): super(ContextgenCNN, self).__init__() self.latent_dim = latent_dim self.conv1 = nn.Conv2d(self.latent_dim, self.latent_dim // 4, 1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 4, 2, stride=1, padding=1, bias=False) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 2, stride=1, padding=0, bias=False) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 1, stride=1, padding=0, bias=False) def forward(self, x): x = F.elu(self.conv1(x)) x = F.elu(self.conv2(x)) x = F.elu(self.conv3(x)) x = F.elu(self.conv4(x)) return x def train_order_block_ids(self): return [[0, 3]] def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None) tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_elu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_elu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (y0 + 4096 * x2 + 4194304 * y1), tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_2, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_3, (256, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_4, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_5, (1024, 512, 1, 1), (512, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 4096)](primals_2, buf0, 4096, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((256, 256, 2, 2), (1024, 1, 512, 256), torch.float32) triton_poi_fused_1[grid(65536, 4)](primals_3, buf1, 65536, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((512, 256, 2, 2), (1024, 1, 512, 256), torch.float32) triton_poi_fused_2[grid(131072, 4)](primals_4, buf2, 131072, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) triton_poi_fused_elu_3[grid(4194304)](buf3, buf4, 4194304, XBLOCK= 1024, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 65, 65), (1081600, 1, 16640, 256)) buf6 = empty_strided_cuda((4, 256, 65, 65), (1081600, 1, 16640, 256 ), torch.float32) triton_poi_fused_elu_4[grid(4326400)](buf5, buf6, 4326400, XBLOCK= 1024, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf8 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) triton_poi_fused_elu_5[grid(8388608)](buf7, buf8, 8388608, XBLOCK= 1024, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 1024, 64, 64), (4194304, 1, 65536, 1024)) buf10 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 4096, 64, 1 ), torch.float32) triton_poi_fused_elu_6[grid(16384, 1024)](buf9, buf10, 16384, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf10, primals_1, buf0, buf1, buf2, primals_5, buf3, buf4, buf5, buf6, buf7, buf8, buf9) class ContextgenCNNNew(nn.Module): def __init__(self, latent_dim=1024): super(ContextgenCNNNew, self).__init__() self.latent_dim = latent_dim self.conv1 = nn.Conv2d(self.latent_dim, self.latent_dim // 4, 1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 4, 2, stride=1, padding=1, bias=False) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 2, stride=1, padding=0, bias=False) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 1, stride=1, padding=0, bias=False) def train_order_block_ids(self): return [[0, 3]] def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SarodYatawatta/federated-pytorch-test
ContextgenCNN
false
8,765
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
FCLayer
import torch import torch.nn as nn class FCLayer(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super(FCLayer, self).__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, x): x = self.dropout(x) if self.use_activation: x = self.tanh(x) return self.linear(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class FCLayerNew(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super(FCLayerNew, self).__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
StevenChaoo/R-BERT-DDI
FCLayer
false
1,091
[ "MIT" ]
0
6d9666e0bc61397ca942ffad53653690c1e8a899
https://github.com/StevenChaoo/R-BERT-DDI/tree/6d9666e0bc61397ca942ffad53653690c1e8a899
CumulativeLinkLoss
import torch import numpy as np from torch import nn from typing import Optional import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor: """ Reduce loss Parameters ---------- loss : torch.Tensor, [batch_size, num_classes] Batch losses. reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. Returns ------- loss : torch.Tensor Reduced loss. """ if reduction == 'elementwise_mean': return loss.mean() elif reduction == 'none': return loss elif reduction == 'sum': return loss.sum() else: raise ValueError(f'{reduction} is not a valid reduction') def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor', reduction: 'str'='elementwise_mean', class_weights: 'Optional[np.ndarray]'=None) ->torch.Tensor: """ Calculates the negative log likelihood using the logistic cumulative link function. See "On the consistency of ordinal regression methods", Pedregosa et. al. for more details. While this paper is not the first to introduce this, it is the only one that I could find that was easily readable outside of paywalls. Parameters ---------- y_pred : torch.Tensor, [batch_size, num_classes] Predicted target class probabilities. float dtype. y_true : torch.Tensor, [batch_size, 1] True target classes. long dtype. reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. class_weights : np.ndarray, [num_classes] optional (default=None) An array of weights for each class. If included, then for each sample, look up the true class and multiply that sample's loss by the weight in this array. Returns ------- loss: torch.Tensor """ eps = 1e-15 likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true.unsqueeze(1)), eps, 1 - eps) neg_log_likelihood = -torch.log(likelihoods) if class_weights is not None: class_weights = torch.as_tensor(class_weights, dtype= neg_log_likelihood.dtype, device=neg_log_likelihood.device) neg_log_likelihood *= class_weights[y_true] loss = _reduction(neg_log_likelihood, reduction) return loss class CumulativeLinkLoss(nn.Module): """ Module form of cumulative_link_loss() loss function Parameters ---------- reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. class_weights : np.ndarray, [num_classes] optional (default=None) An array of weights for each class. If included, then for each sample, look up the true class and multiply that sample's loss by the weight in this array. """ def __init__(self, reduction: 'str'='elementwise_mean', class_weights: 'Optional[torch.Tensor]'=None) ->None: super().__init__() self.class_weights = class_weights self.reduction = reduction def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor' ) ->torch.Tensor: return cumulative_link_loss(y_pred, y_true, reduction=self. reduction, class_weights=self.class_weights) def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype= torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn from typing import Optional import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_clamp_gather_log_mean_neg_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = tmp6.to(tl.float32) tmp8 = 1e-15 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = 0.999999999999999 tmp11 = triton_helpers.minimum(tmp9, tmp10) tmp12 = tmp11.to(tl.int64) tmp13 = tmp12.to(tl.float32) tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 4.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_clamp_gather_log_mean_neg_0[grid(1)](buf1, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor: """ Reduce loss Parameters ---------- loss : torch.Tensor, [batch_size, num_classes] Batch losses. reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. Returns ------- loss : torch.Tensor Reduced loss. """ if reduction == 'elementwise_mean': return loss.mean() elif reduction == 'none': return loss elif reduction == 'sum': return loss.sum() else: raise ValueError(f'{reduction} is not a valid reduction') def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor', reduction: 'str'='elementwise_mean', class_weights: 'Optional[np.ndarray]'=None) ->torch.Tensor: """ Calculates the negative log likelihood using the logistic cumulative link function. See "On the consistency of ordinal regression methods", Pedregosa et. al. for more details. While this paper is not the first to introduce this, it is the only one that I could find that was easily readable outside of paywalls. Parameters ---------- y_pred : torch.Tensor, [batch_size, num_classes] Predicted target class probabilities. float dtype. y_true : torch.Tensor, [batch_size, 1] True target classes. long dtype. reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. class_weights : np.ndarray, [num_classes] optional (default=None) An array of weights for each class. If included, then for each sample, look up the true class and multiply that sample's loss by the weight in this array. Returns ------- loss: torch.Tensor """ eps = 1e-15 likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true.unsqueeze(1)), eps, 1 - eps) neg_log_likelihood = -torch.log(likelihoods) if class_weights is not None: class_weights = torch.as_tensor(class_weights, dtype= neg_log_likelihood.dtype, device=neg_log_likelihood.device) neg_log_likelihood *= class_weights[y_true] loss = _reduction(neg_log_likelihood, reduction) return loss class CumulativeLinkLossNew(nn.Module): """ Module form of cumulative_link_loss() loss function Parameters ---------- reduction : str Method for reducing the loss. Options include 'elementwise_mean', 'none', and 'sum'. class_weights : np.ndarray, [num_classes] optional (default=None) An array of weights for each class. If included, then for each sample, look up the true class and multiply that sample's loss by the weight in this array. """ def __init__(self, reduction: 'str'='elementwise_mean', class_weights: 'Optional[torch.Tensor]'=None) ->None: super().__init__() self.class_weights = class_weights self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ramstein/Retinopathy2
CumulativeLinkLoss
false
972
[ "MIT" ]
0
669e74206c466e6351d4e3df6087c6aa39b5c6c2
https://github.com/Ramstein/Retinopathy2/tree/669e74206c466e6351d4e3df6087c6aa39b5c6c2
SplitAndConcat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/dm/cdm7gguqbidi2lqrahwmmne3zoqbx3zistzz33duvcyyqdkluky6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 32) x0 = xindex % 32 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*((-4) + x1))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 32 x0 = xindex % 32 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * (-4 + x1)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SplitAndConcatNew(nn.Module): """Split the data from split_dim and concatenate in concat_dim. @param split_dim from which axis the data will be chunk @param concat_dim to which axis the data will be concatenated @param chunk size of the data to be chunk/concatenated """ def __init__(self, split_dim: 'int'=1, concat_dim: 'int'=0, chunk: 'int'=2 ): super(SplitAndConcatNew, self).__init__() self.split_dim = split_dim self.concat_dim = concat_dim self.chunk = chunk def extra_repr(self): return ( f'split_dim={self.split_dim}, concat_dim={self.concat_dim}, chunk={self.chunk}' ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
tsubauaaa/d2go
SplitAndConcat
false
4,461
[ "Apache-2.0" ]
0
9f746159ebf78ce79f644c405ca8695bc29d1075
https://github.com/tsubauaaa/d2go/tree/9f746159ebf78ce79f644c405ca8695bc29d1075
TensorExp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/55/c554z2kehw4y75kmgw5gvu2rebcutcxsad4kke4bln6vvbihauog.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorExpNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Minyus/pipelinex
TensorExp
false
14,033
[ "Apache-2.0" ]
188
f35c524ec9c50751ee27d9a42d98317e16f1c544
https://github.com/Minyus/pipelinex/tree/f35c524ec9c50751ee27d9a42d98317e16f1c544
RpowInt
import torch class RpowInt(torch.nn.Module): def __init__(self): super(RpowInt, self).__init__() def forward(self, x): return 2 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowIntNew(torch.nn.Module): def __init__(self): super(RpowIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ilyabasharov/torch2trt
RpowInt
false
2,541
[ "MIT" ]
0
76bf298b3da408509665e23e2494922b131afb10
https://github.com/Ilyabasharov/torch2trt/tree/76bf298b3da408509665e23e2494922b131afb10
SDPAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py # Topologically Sorted Source Nodes: [sm_qk], Original ATen: [aten._softmax] # Source node to ATen node mapping: # sm_qk => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [sm_qk], Original ATen: [aten._softmax] # Source node to ATen node mapping: # sm_qk => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [qk], Original ATen: [aten.bmm] extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sm_qk], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sm_qk], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class SDPAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, dropout=0, causal=False): super(SDPAttentionNew, self).__init__() self.causal = causal self.dropout = nn.Dropout(dropout) self.mask_q = None self.mask_k = None def set_mask_q(self, masked_tq): self.mask_q = masked_tq def set_mask_k(self, masked_tk): self.mask_k = masked_tk def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
SDPAttention
false
3,775
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
CossimLoss
import torch import torch.nn as nn class CossimLoss(nn.Module): def __init__(self, dim: 'int'=1, eps: 'float'=1e-08): super().__init__() self.cos_sim = nn.CosineSimilarity(dim, eps) def forward(self, output, target): return -self.cos_sim(output, target).mean() + 1 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_per_fused_add_mean_neg_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tmp13 = 1.0 tmp14 = tmp12 + tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)]( arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_mean_neg_sum_1[grid(1)](buf2, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, class CossimLossNew(nn.Module): def __init__(self, dim: 'int'=1, eps: 'float'=1e-08): super().__init__() self.cos_sim = nn.CosineSimilarity(dim, eps) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Geson-anko/VQ_AutoEncoder
CossimLoss
false
2,278
[ "MIT" ]
0
62e1694de38ea6f152891e19abc190ad4048e587
https://github.com/Geson-anko/VQ_AutoEncoder/tree/62e1694de38ea6f152891e19abc190ad4048e587
EuclideanLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ie/cie26aqrhrjj27vnihr4jur2pvlo3x4acpibdw2jj7v6zbvkxes3.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, diff], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # diff => div # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 8), kwargs = {}) triton_per_fused_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.125 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, diff], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.125 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class EuclideanLossNew(nn.Module): def __init__(self): super(EuclideanLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
IndigoPurple/EFENet
EuclideanLoss
false
8,290
[ "MIT" ]
11
e88234486f19534274a0a20badc251788ac67e31
https://github.com/IndigoPurple/EFENet/tree/e88234486f19534274a0a20badc251788ac67e31
nin
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/37/c37esw52rucibb46dl26rfvuzbcbxbhcpsd7ramumzunyzagvgwq.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/jg/cjg527q7k5sloxuipk76c6qbvftmhbkafocncizwfc4enj3gepuu.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_poi_fused__weight_norm_interface_2 = async_compile.triton('triton_poi_fused__weight_norm_interface_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_2.run(primals_3, primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf2, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils import weight_norm as wn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_2[grid(16)](primals_3, primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 1, 16, 4), 0 ), buf2, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1 class ninNew(nn.Module): def __init__(self, dim_in, dim_out): super(ninNew, self).__init__() self.lin_a = wn(nn.Linear(dim_in, dim_out)) self.dim_out = dim_out def forward(self, input_0): primals_4 = self.lin_a.bias primals_2 = self.lin_a.weight_g primals_3 = self.lin_a.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
elahekhodaie/PixelCnnPP
nin
false
10,070
[ "MIT" ]
0
ab1e245ed8c24009364b1f891288eb1a526b0121
https://github.com/elahekhodaie/PixelCnnPP/tree/ab1e245ed8c24009364b1f891288eb1a526b0121
ActorNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/cn/ccnvkf7kfnskbbfy2kwx55oghjftngamwdttghryrfs4g3fay72l.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ta/ctarkhd7gqmxsmyghlzwykngqas6dfclf7bk5ijucwm3q5stwfhi.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # out => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_7,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 256), (256, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 16384, grid=grid(16384), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 256), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf4 # reuse buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf8, 16384, grid=grid(16384), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf7, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(buf5, (64, 256), (256, 1), 0), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256), (256, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf10, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 256), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf5, primals_7, buf8, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_sigmoid_1[grid(256)](buf7, primals_9, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf5, (64, 256), (256, 1), 0 ), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10 class ActorNetNew(nn.Module): """ Actor Network """ def __init__(self, state_num, action_num, hidden1=256, hidden2=256, hidden3=256): """ :param state_num: number of states :param action_num: number of actions :param hidden1: hidden layer 1 dimension :param hidden2: hidden layer 2 dimension :param hidden3: hidden layer 3 dimension """ super(ActorNetNew, self).__init__() self.fc1 = nn.Linear(state_num, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, hidden3) self.fc4 = nn.Linear(hidden3, action_num) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Kanaderu/spiking-ddpg-mapless-navigation
ActorNet
false
8,384
[ "MIT" ]
29
2b5e7e67385dee4428b8036bc4ffe95e812b34e0
https://github.com/Kanaderu/spiking-ddpg-mapless-navigation/tree/2b5e7e67385dee4428b8036bc4ffe95e812b34e0
LT
import torch class LT(torch.nn.Module): def __init__(self): super(LT, self).__init__() def forward(self, x, y): return x < y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_lt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 < tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_lt_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class LTNew(torch.nn.Module): def __init__(self): super(LTNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bunderhi/torch2trt
LT
false
1,600
[ "MIT" ]
0
fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
https://github.com/bunderhi/torch2trt/tree/fa5e31e742a0f0c9a9ee38909a6fa56bb07ba96d
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/wv/cwvsxo4q6wyoxpozsubbimmg6xvl34ow44hy6yl5mwa23uuy77sa.py # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # mixture_w => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0) return (buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf2 class EncoderNew(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N): super(EncoderNew, self).__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias= False) def forward(self, input_0): primals_2 = self.conv1d_U.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
zhangxinaaaa/Conv-TasNet
Encoder
false
11,037
[ "MIT" ]
0
4622d93d0b9dbe23584addd4f4b9463255651652
https://github.com/zhangxinaaaa/Conv-TasNet/tree/4622d93d0b9dbe23584addd4f4b9463255651652
UFOAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/oh/cohi5innigfnlg3a4vz3tzzxnyrdwyi342o5yrghstooasfjvhet.py # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] # Source node to ATen node mapping: # kv => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/zo/czomnz74isoxx6up5u2hmx722cdztmgay2wiq7c54b4ihien6mpt.py # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] # Source node to ATen node mapping: # kv => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/t2/ct2mzc2szheskf3r6msdggseeexrinz2uczzpn6k3pwvv6vc2vio.py # Topologically Sorted Source Nodes: [norm_tensor, mul, kv_norm], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div] # Source node to ATen node mapping: # kv_norm => div # mul => mul # norm_tensor => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_11, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, %primals_10), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %pow_2), kwargs = {}) triton_poi_fused_div_linalg_vector_norm_mul_2 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = (xindex // 16) % 4 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4*x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x5)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x5)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x5)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + (x4), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/h6/ch65kkrcljiqxk6uhmtskijz2ynwfqf7gggfxjtnlm2uhjjju6sb.py # Topologically Sorted Source Nodes: [norm_tensor_1, mul_1, q_norm, matmul_1], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div, aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_2 # mul_1 => mul_1 # norm_tensor_1 => pow_3, pow_4, sum_2 # q_norm => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%permute_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, %primals_10), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %pow_4), kwargs = {}) # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_div_linalg_vector_norm_mul_3 = async_compile.triton('triton_poi_fused_clone_div_linalg_vector_norm_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_linalg_vector_norm_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x5 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4*x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x5)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x5)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x5)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 64, 4, grid=grid(64, 4), stream=stream0) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_8, buf4, 256, grid=grid(256), stream=stream0) del primals_8 buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_tensor, mul, kv_norm], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div] triton_poi_fused_div_linalg_vector_norm_mul_2.run(buf5, primals_10, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_tensor_1, mul_1, q_norm, matmul_1], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div, aten.clone] triton_poi_fused_clone_div_linalg_vector_norm_mul_3.run(buf0, primals_10, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf8, buf9, 256, grid=grid(256), stream=stream0) del buf8 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf10) del primals_12 return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = xindex // 16 % 4 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + x4, tmp15, xmask) @triton.jit def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x5 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp15, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_6, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_1[grid(256)](buf2, primals_8, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_linalg_vector_norm_mul_2[grid(256)](buf5, primals_10, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_div_linalg_vector_norm_mul_3[grid(256)](buf0, primals_10, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf10) del primals_12 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0 ), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) def XNorm(x, gamma): norm_tensor = torch.norm(x, 2, -1, True) return x * gamma / norm_tensor class UFOAttentionNew(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(UFOAttentionNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.gamma = nn.Parameter(torch.randn((1, h, 1, 1))) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0, input_1, input_2): primals_10 = self.gamma primals_3 = self.fc_q.weight primals_4 = self.fc_q.bias primals_5 = self.fc_k.weight primals_6 = self.fc_k.bias primals_7 = self.fc_v.weight primals_8 = self.fc_v.bias primals_11 = self.fc_o.weight primals_12 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
LiChengChen666/DetectDee
UFOAttention
false
9,835
[ "Apache-2.0" ]
0
1e6aaa0d15b1fc12d1342d8a922004e372b5f437
https://github.com/LiChengChen666/DetectDee/tree/1e6aaa0d15b1fc12d1342d8a922004e372b5f437
ClippedLinearQuantization
import torch from torch.optim.lr_scheduler import * import torch.optim import torch.nn as nn import torch.optim.lr_scheduler import torch.nn.parallel import torch.utils.data import torch.onnx import torch.testing def linear_dequantize(input, scale, zero_point, inplace=False): if inplace: input.add_(zero_point).div_(scale) return input return (input + zero_point) / scale def linear_quantize(input, scale, zero_point, inplace=False): if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(scale * input - zero_point) def _prep_saturation_val_tensor(sat_val): is_scalar = not isinstance(sat_val, torch.Tensor) out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach() if not out.is_floating_point(): out = out if out.dim() == 0: out = out.unsqueeze(0) return is_scalar, out def asymmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, integral_zero_point=True, signed=False): scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min) scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max) is_scalar = scalar_min and scalar_max if scalar_max and not scalar_min: sat_max = sat_max elif scalar_min and not scalar_max: sat_min = sat_min if any(sat_min > sat_max): raise ValueError('saturation_min must be smaller than saturation_max') n = 2 ** num_bits - 1 sat_min = torch.min(sat_min, torch.zeros_like(sat_min)) sat_max = torch.max(sat_max, torch.zeros_like(sat_max)) diff = sat_max - sat_min diff[diff == 0] = n scale = n / diff zero_point = scale * sat_min if integral_zero_point: zero_point = zero_point.round() if signed: zero_point += 2 ** (num_bits - 1) if is_scalar: return scale.item(), zero_point.item() return scale, zero_point def clamp(input, min, max, inplace=False): if inplace: input.clamp_(min, max) return input return torch.clamp(input, min, max) class LinearQuantizeSTE(torch.autograd.Function): @staticmethod def forward(ctx, input, scale, zero_point, dequantize, inplace): if inplace: ctx.mark_dirty(input) output = linear_quantize(input, scale, zero_point, inplace) if dequantize: output = linear_dequantize(output, scale, zero_point, inplace) return output @staticmethod def backward(ctx, grad_output): return grad_output, None, None, None, None class ClippedLinearQuantization(nn.Module): def __init__(self, num_bits, clip_val, dequantize=True, inplace=False): super(ClippedLinearQuantization, self).__init__() self.num_bits = num_bits self.clip_val = clip_val self.scale, self.zero_point = asymmetric_linear_quantization_params( num_bits, 0, clip_val, signed=False) self.dequantize = dequantize self.inplace = inplace def forward(self, input): input = clamp(input, 0, self.clip_val, self.inplace) input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point, self.dequantize, self.inplace) return input def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__. __name__, self.num_bits, self.clip_val, inplace_str) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_bits': 4, 'clip_val': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.optim.lr_scheduler import * import torch.optim import torch.nn as nn import torch.optim.lr_scheduler import torch.nn.parallel import torch.utils.data import torch.onnx import torch.testing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_div_mul_round_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 4.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = 3.75 tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp1 tmp8 = libdevice.nearbyint(tmp7) tmp9 = tmp8 + tmp1 tmp10 = 0.26666666666666666 tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_mul_round_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def linear_dequantize(input, scale, zero_point, inplace=False): if inplace: input.add_(zero_point).div_(scale) return input return (input + zero_point) / scale def linear_quantize(input, scale, zero_point, inplace=False): if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(scale * input - zero_point) def _prep_saturation_val_tensor(sat_val): is_scalar = not isinstance(sat_val, torch.Tensor) out = torch.tensor(sat_val) if is_scalar else sat_val.clone().detach() if not out.is_floating_point(): out = out if out.dim() == 0: out = out.unsqueeze(0) return is_scalar, out def asymmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, integral_zero_point=True, signed=False): scalar_min, sat_min = _prep_saturation_val_tensor(saturation_min) scalar_max, sat_max = _prep_saturation_val_tensor(saturation_max) is_scalar = scalar_min and scalar_max if scalar_max and not scalar_min: sat_max = sat_max elif scalar_min and not scalar_max: sat_min = sat_min if any(sat_min > sat_max): raise ValueError('saturation_min must be smaller than saturation_max') n = 2 ** num_bits - 1 sat_min = torch.min(sat_min, torch.zeros_like(sat_min)) sat_max = torch.max(sat_max, torch.zeros_like(sat_max)) diff = sat_max - sat_min diff[diff == 0] = n scale = n / diff zero_point = scale * sat_min if integral_zero_point: zero_point = zero_point.round() if signed: zero_point += 2 ** (num_bits - 1) if is_scalar: return scale.item(), zero_point.item() return scale, zero_point def clamp(input, min, max, inplace=False): if inplace: input.clamp_(min, max) return input return torch.clamp(input, min, max) class LinearQuantizeSTE(torch.autograd.Function): @staticmethod def forward(ctx, input, scale, zero_point, dequantize, inplace): if inplace: ctx.mark_dirty(input) output = linear_quantize(input, scale, zero_point, inplace) if dequantize: output = linear_dequantize(output, scale, zero_point, inplace) return output @staticmethod def backward(ctx, grad_output): return grad_output, None, None, None, None class ClippedLinearQuantizationNew(nn.Module): def __init__(self, num_bits, clip_val, dequantize=True, inplace=False): super(ClippedLinearQuantizationNew, self).__init__() self.num_bits = num_bits self.clip_val = clip_val self.scale, self.zero_point = asymmetric_linear_quantization_params( num_bits, 0, clip_val, signed=False) self.dequantize = dequantize self.inplace = inplace def __repr__(self): inplace_str = ', inplace' if self.inplace else '' return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__. __name__, self.num_bits, self.clip_val, inplace_str) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HatsuneMiku4/distiller
ClippedLinearQuantization
false
2,390
[ "Apache-2.0" ]
0
8fbacb01ebcb7d70c5d3ecb6a88093e6c4d42137
https://github.com/HatsuneMiku4/distiller/tree/8fbacb01ebcb7d70c5d3ecb6a88093e6c4d42137
PrototypicalNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hp/chptg55ofbvcnhmdfgvcjqku736b3xcyxeahxk2eb2jpz7f2wey2.py # Topologically Sorted Source Nodes: [prototypes_1], Original ATen: [aten.div] # Source node to ATen node mapping: # prototypes_1 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/sa/csatjxzzljvzrbsywuhl5u6duvaywsdqxfdcvhr7vxe6wtpp43gn.py # Topologically Sorted Source Nodes: [neg, add, mul_2, dist, dist_1, scores_cls], Original ATen: [aten.neg, aten.add, aten.mul, aten.sub, aten.div] # Source node to ATen node mapping: # add => add # dist => sub # dist_1 => div_1 # mul_2 => mul_2 # neg => neg # scores_cls => mul_3 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_4,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, %expand_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_2), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %div_1), kwargs = {}) triton_poi_fused_add_div_mul_neg_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (4*x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x4)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x4)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (x3), xmask) tmp29 = tl.load(in_ptr2 + (0)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 0.25 tmp28 = tmp26 * tmp27 tmp31 = -tmp30 tmp32 = tmp31 * tmp28 tl.store(in_out_ptr0 + (x3), tmp28, xmask) tl.store(out_ptr0 + (x3), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [prototypes], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), primals_1, out=buf0) del primals_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [prototypes_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [AB], Original ATen: [aten.bmm] extern_kernels.bmm(primals_3, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [neg, add, mul_2, dist, dist_1, scores_cls], Original ATen: [aten.neg, aten.add, aten.mul, aten.sub, aten.div] triton_poi_fused_add_div_mul_neg_sub_1.run(buf4, primals_3, buf1, primals_4, buf5, 64, grid=grid(64), stream=stream0) del buf1 del primals_3 del primals_4 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_add_div_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + x3, xmask) tmp29 = tl.load(in_ptr2 + 0) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 0.25 tmp28 = tmp26 * tmp27 tmp31 = -tmp30 tmp32 = tmp31 * tmp28 tl.store(in_out_ptr0 + x3, tmp28, xmask) tl.store(out_ptr0 + x3, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), primals_1, out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf1, (4, 4, 4), ( 16, 1, 4), 0), out=buf2) buf3 = buf2 del buf2 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_neg_sub_1[grid(64)](buf4, primals_3, buf1, primals_4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_3 del primals_4 return buf5, buf4 def L2SquareDist(A, B, average=True): assert A.dim() == 3 assert B.dim() == 3 assert A.size(0) == B.size(0) and A.size(2) == B.size(2) nB = A.size(0) Na = A.size(1) Nb = B.size(1) nC = A.size(2) AB = torch.bmm(A, B.transpose(1, 2)) AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1) BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb) dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB if average: dist = dist / nC return dist class PrototypicalNetworkNew(nn.Module): def __init__(self, opt): super(PrototypicalNetworkNew, self).__init__() scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0 self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls), requires_grad=True) def forward(self, input_0, input_1, input_2): primals_4 = self.scale_cls primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Basasuya/FewShotWithoutForgetting
PrototypicalNetwork
false
3,469
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
EncoderLayer
import math import torch from torch import nn import torch.nn.functional as F import torch.utils.data.distributed def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class FeedForward(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.linear1 = nn.Linear(d_model, d_hidden) self.linear2 = nn.Linear(d_hidden, d_model) def forward(self, x): return self.linear2(F.relu(self.linear1(x))) class Attention(nn.Module): def __init__(self, d_key, drop_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(drop_ratio) self.causal = causal def forward(self, query, key, value): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and (self is None or self.causal): tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF if key.is_cuda: tri = tri dot_products.data.sub_(tri.unsqueeze(0)) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False): super().__init__() self.attention = Attention(d_key, drop_ratio, causal=causal) self.wq = nn.Linear(d_key, d_key, bias=False) self.wk = nn.Linear(d_key, d_key, bias=False) self.wv = nn.Linear(d_value, d_value, bias=False) self.wo = nn.Linear(d_value, d_key, bias=False) self.n_heads = n_heads def forward(self, query, key, value): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in zip(query, key, value)], -1)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, drop_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(drop_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x): return self.layernorm(x[0] + self.dropout(self.layer(*x))) class EncoderLayer(nn.Module): def __init__(self, d_model, d_hidden, n_heads, drop_ratio): super().__init__() self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads, drop_ratio), d_model, drop_ratio) self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden), d_model, drop_ratio) def forward(self, x): return self.feedforward(self.selfattn(x, x, x)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_hidden': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0) del buf6 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0) del buf18 triton_poi_fused_add_mean_std_3[grid(16)](buf22, primals_2, buf20, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_6, primals_2, buf20, buf23, buf22, primals_7, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf22 del buf23 del primals_7 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25) buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0) del buf25 buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf26, primals_9, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0) del buf27 triton_poi_fused_add_6[grid(64)](buf28, buf24, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_12, buf28, primals_13, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor( buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8, primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class FeedForward(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.linear1 = nn.Linear(d_model, d_hidden) self.linear2 = nn.Linear(d_hidden, d_model) def forward(self, x): return self.linear2(F.relu(self.linear1(x))) class Attention(nn.Module): def __init__(self, d_key, drop_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(drop_ratio) self.causal = causal def forward(self, query, key, value): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and (self is None or self.causal): tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF if key.is_cuda: tri = tri dot_products.data.sub_(tri.unsqueeze(0)) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False): super().__init__() self.attention = Attention(d_key, drop_ratio, causal=causal) self.wq = nn.Linear(d_key, d_key, bias=False) self.wk = nn.Linear(d_key, d_key, bias=False) self.wv = nn.Linear(d_value, d_value, bias=False) self.wo = nn.Linear(d_value, d_key, bias=False) self.n_heads = n_heads def forward(self, query, key, value): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in zip(query, key, value)], -1)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, drop_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(drop_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x): return self.layernorm(x[0] + self.dropout(self.layer(*x))) class EncoderLayerNew(nn.Module): def __init__(self, d_model, d_hidden, n_heads, drop_ratio): super().__init__() self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads, drop_ratio), d_model, drop_ratio) self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden), d_model, drop_ratio) def forward(self, input_0): primals_1 = self.selfattn.layer.wq.weight primals_3 = self.selfattn.layer.wk.weight primals_4 = self.selfattn.layer.wv.weight primals_5 = self.selfattn.layer.wo.weight primals_6 = self.selfattn.layernorm.gamma primals_7 = self.selfattn.layernorm.beta primals_8 = self.feedforward.layer.linear1.weight primals_9 = self.feedforward.layer.linear1.bias primals_10 = self.feedforward.layer.linear2.weight primals_11 = self.feedforward.layer.linear2.bias primals_12 = self.feedforward.layernorm.gamma primals_13 = self.feedforward.layernorm.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
matatabinoneko/densecap
EncoderLayer
false
12,776
[ "BSD-3-Clause" ]
0
723d9c2cfd3f16b2eb7584cc7cb0aaef973854dd
https://github.com/matatabinoneko/densecap/tree/723d9c2cfd3f16b2eb7584cc7cb0aaef973854dd
first_conv
import torch import torch.nn as nn import torch.nn.functional as F class first_conv(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): super(first_conv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.layer_type = 'FConv2d' self.transform = None def forward(self, x): restore_w = self.weight max = restore_w.data.max() weight_q = restore_w.div(max).mul(127).round().div(127).mul(max) weight_q = (weight_q - restore_w).detach() + restore_w return F.conv2d(x, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_max_mul_round_sub_0(in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 / tmp3 tmp5 = 127.0 tmp6 = tmp4 * tmp5 tmp7 = libdevice.nearbyint(tmp6) tmp8 = 0.007874015748031496 tmp9 = tmp7 * tmp8 tmp10 = tmp9 * tmp3 tmp11 = tmp10 - tmp0 tmp12 = tmp11 + tmp0 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp12, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_max_mul_round_sub_0[grid(1)](primals_1, buf1, 1, 256, num_warps=2, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) return buf2, primals_2, buf1 class first_convNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): super(first_convNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.layer_type = 'FConv2d' self.transform = None def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
RuiLin0212/BATMANN
first_conv
false
17,854
[ "MIT" ]
6
5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
https://github.com/RuiLin0212/BATMANN/tree/5c5cc3334090fc0442bfd2ffdd41bdcab88cbea2
ActorCritic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] # Source node to ATen node mapping: # action_std => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {}) triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 64), (64, 1)) assert_size_stride(primals_10, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_10 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0), buf5, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_9, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import copy import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 64), (64, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_10 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), buf5, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_9, primals_6, primals_4 class ActorCriticNew(nn.Module): def __init__(self, num_inputs, num_outputs, hidden=64): super(ActorCriticNew, self).__init__() self.affine1 = nn.Linear(num_inputs, hidden) self.affine2 = nn.Linear(hidden, hidden) self.action_mean = nn.Linear(hidden, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.value_head = nn.Linear(hidden, 1) self.module_list_current = [self.affine1, self.affine2, self. action_mean, self.action_log_std, self.value_head] self.module_list_old = [None] * len(self.module_list_current) self.backup() def backup(self): for i in range(len(self.module_list_current)): self.module_list_old[i] = copy.deepcopy(self.module_list_current[i] ) def forward(self, input_0): primals_8 = self.action_log_std primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_9 = self.value_head.weight primals_10 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1], output[2], output[3]
tpbarron/pytorch-ppo
ActorCritic
false
16,617
[ "MIT" ]
47
f73226865e34443f93dbec58939329c9278828e8
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
LayerNormGRUCell
import torch from typing import Optional import torch.nn.functional as F from torch import nn import torch.utils.data import torch.nn from torch.nn import RNNCellBase import torch.multiprocessing from torch.nn import Identity class LayerNormGRUCell(RNNCellBase): """ Implements GRUCell with layer normalisation and zone-out on top. It inherits the base RNN cell whose trainable weight matrices are used. References: [1] Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "Layer Normalization." (2016). [2] Krueger, David, et al. "Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations." (2016). :param input_size: Number of input features to the cell :param hidden_size: Number of hidden states in the cell :param use_layer_norm: If set to True, layer normalisation is applied to reset, update and new tensors before activation. :param dropout: Dropout probability for the hidden states [0,1] """ def __init__(self, input_size: 'int', hidden_size: 'int', use_layer_norm: 'bool'=False, dropout: 'float'=0.0): super(LayerNormGRUCell, self).__init__(input_size, hidden_size, bias=False, num_chunks=3) self.dropout = dropout self.ln_r = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_z = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_n = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() def forward(self, input: 'torch.Tensor', hx: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: if hx is None: hx = input.new_zeros(size=(input.size(0), self.hidden_size), requires_grad=False) ih = input.mm(self.weight_ih.t()) hh = hx.mm(self.weight_hh.t()) i_r, i_z, i_n = ih.chunk(3, dim=1) h_r, h_z, h_n = hh.chunk(3, dim=1) r = torch.sigmoid(self.ln_r(i_r + h_r)) z = torch.sigmoid(self.ln_z(i_z + h_z)) n = torch.tanh(self.ln_n(i_n + r * h_n)) new_h = (torch.tensor(1.0) - z) * n + z * hx if self.dropout > 0.0: bernouli_mask = F.dropout(torch.ones_like(new_h), p=self. dropout, training=bool(self.training)) new_h = bernouli_mask * new_h + (torch.tensor(1.0) - bernouli_mask ) * hx return new_h class Identity(nn.Module): """ Implements an identity torch module where input is passed as it is to output. There are no parameters in the module. """ def __init__(self) ->None: super(Identity, self).__init__() def forward(self, input: 'torch.Tensor') ->torch.Tensor: return input def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn from torch.nn import RNNCellBase import torch.multiprocessing from torch.nn import Identity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_lift_fresh_mul_sigmoid_sub_tanh_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0 + 12 * x1), xmask) tmp4 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp5 = tl.load(in_ptr1 + (x0 + 12 * x1), xmask) tmp8 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp9 = tl.load(in_ptr1 + (8 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp7 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp13 - tmp3 tmp15 = tmp14 * tmp12 tmp16 = 0.0 tmp17 = tmp3 * tmp16 tmp18 = tmp15 + tmp17 tl.store(out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) tl.store(out_ptr2 + x2, tmp12, xmask) tl.store(out_ptr3 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_lift_fresh_mul_sigmoid_sub_tanh_1[grid(16)](buf1, buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf6, primals_1, buf0, reinterpret_tensor(buf2, (4, 4), (12, 1), 8 ), buf3, buf4, buf5 class LayerNormGRUCellNew(RNNCellBase): """ Implements GRUCell with layer normalisation and zone-out on top. It inherits the base RNN cell whose trainable weight matrices are used. References: [1] Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "Layer Normalization." (2016). [2] Krueger, David, et al. "Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations." (2016). :param input_size: Number of input features to the cell :param hidden_size: Number of hidden states in the cell :param use_layer_norm: If set to True, layer normalisation is applied to reset, update and new tensors before activation. :param dropout: Dropout probability for the hidden states [0,1] """ def __init__(self, input_size: 'int', hidden_size: 'int', use_layer_norm: 'bool'=False, dropout: 'float'=0.0): super(LayerNormGRUCellNew, self).__init__(input_size, hidden_size, bias=False, num_chunks=3) self.dropout = dropout self.ln_r = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_z = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_n = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() def forward(self, input_0): primals_2 = self.weight_ih primals_3 = self.weight_hh primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0] class Identity(nn.Module): """ Implements an identity torch module where input is passed as it is to output. There are no parameters in the module. """ def __init__(self) ->None: super(Identity, self).__init__() def forward(self, input: 'torch.Tensor') ->torch.Tensor: return input
faz1993/InnerEye-DeepLearning
LayerNormGRUCell
false
15,339
[ "MIT" ]
402
fb258d5c9a3ba18565b5a67e7ac1f00127d9ecb9
https://github.com/faz1993/InnerEye-DeepLearning/tree/fb258d5c9a3ba18565b5a67e7ac1f00127d9ecb9
StatsNet
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7r/c7ropiifeibl5ynmvclnzdbugv3jj2chhjkp6274o2examg2m3ba.py # Topologically Sorted Source Nodes: [mean, std], Original ATen: [aten.mean, aten.std] # Source node to ATen node mapping: # mean => mean # std => sqrt, var # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [2]), kwargs = {correction: 1.0}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) triton_per_fused_mean_std_0 = async_compile.triton('triton_per_fused_mean_std_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_std_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_std_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tl.store(out_ptr2 + (x2 + (8*x3)), tmp20, xmask) tl.store(out_ptr3 + (x2 + (8*x3)), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf4 = reinterpret_tensor(buf6, (4, 4), (8, 1), 0) # alias buf5 = reinterpret_tensor(buf6, (4, 4), (8, 1), 4) # alias # Topologically Sorted Source Nodes: [mean, std], Original ATen: [aten.mean, aten.std] stream0 = get_raw_stream(0) triton_per_fused_mean_std_0.run(arg0_1, buf4, buf5, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf6, (4, 2, 4), (8, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_std_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tl.store(out_ptr2 + (x2 + 8 * x3), tmp20, xmask) tl.store(out_ptr3 + (x2 + 8 * x3), tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf4 = reinterpret_tensor(buf6, (4, 4), (8, 1), 0) buf5 = reinterpret_tensor(buf6, (4, 4), (8, 1), 4) get_raw_stream(0) triton_per_fused_mean_std_0[grid(16)](arg0_1, buf4, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf6, (4, 2, 4), (8, 4, 1), 0), class StatsNetNew(nn.Module): def __init__(self): super(StatsNetNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
angelicagardner/ensemble-cnn-deepfakes-detection
StatsNet
false
1,438
[ "BSD-3-Clause" ]
0
8740d2317848250249c741e0af5c4cbbe2d8af46
https://github.com/angelicagardner/ensemble-cnn-deepfakes-detection/tree/8740d2317848250249c741e0af5c4cbbe2d8af46
Downsample2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4l/c4ltwiip5bmesya4c4svpfonkcj7oyyhcppgxyi63olpovjsddwb.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 2.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x2 = (xindex // 4) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + (x3), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf3, arg0_1, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0 del buf0 buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (64)](buf3, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf3, class Downsample2dNew(torch.nn.Module): def __init__(self, scale: 'float'=0.5, mode: 'str'='bilinear'): super(Downsample2dNew, self).__init__() self.downsample = functools.partial(torch.nn.functional.interpolate, scale_factor=scale, mode=mode) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ai-in-motion/moai
Downsample2d
false
18,312
[ "Apache-2.0" ]
10
e38cac046c059d2e2331ef4883bbabc5a500a5cf
https://github.com/ai-in-motion/moai/tree/e38cac046c059d2e2331ef4883bbabc5a500a5cf
RDivInt
import torch class RDivInt(torch.nn.Module): def __init__(self): super(RDivInt, self).__init__() def forward(self, x): return 100 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivIntNew(torch.nn.Module): def __init__(self): super(RDivIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PogChamper/torch2trt
RDivInt
false
14,216
[ "MIT" ]
3,363
43b12627ec0de4d212efb6d02b07570205085ccc
https://github.com/PogChamper/torch2trt/tree/43b12627ec0de4d212efb6d02b07570205085ccc
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/y2/cy2ahtligm6mxckolwfrfoxrz62xr4hhzcefsobim46u2dekqbro.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # out_2 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, primals_1, buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 class FullyConnectedBlock(nn.Module): def __init__(self, width, bn=False): super().__init__() self.linear = nn.Linear(width, width, bias=not bn) self.bn = bn if bn: self.bn_layer = nn.BatchNorm1d(width) self.relu = nn.ReLU() def forward(self, x): out = self.linear(x) if self.bn: out = self.bn_layer(x) return self.relu(out) class MLPNew(nn.Module): def __init__(self, num_outputs, width, depth, num_inputs, bn=False): super().__init__() self.bn = bn self.linear_first = nn.Linear(num_inputs, width, bias=not self.bn) self.relu = nn.ReLU() self.layers = self._make_layer(FullyConnectedBlock, width, depth - 2, self.bn) self.linear_last = nn.Linear(width, num_outputs) def _make_layer(self, block, width, depth, bn): layers = [] for i in range(depth): layers.append(block(width, bn=bn)) return nn.Sequential(*layers) def forward(self, input_0): primals_1 = self.linear_first.weight primals_3 = self.linear_first.bias primals_2 = self.linear_last.weight primals_5 = self.linear_last.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Arjung27/DeepThinking
MLP
false
16,942
[ "MIT" ]
6
13a2ce534bcb0b9379a22fffef52d975d650adb2
https://github.com/Arjung27/DeepThinking/tree/13a2ce534bcb0b9379a22fffef52d975d650adb2
SReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wt/cwtbm7ruscnyd5hf7etfdfwptqhgjo73ir43mntx3mmz3qyymrrg.py # Topologically Sorted Source Nodes: [sub, relu, add], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # add => add # relu => relu # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_sub_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_sub_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_sub_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp4 + tmp1 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + (x3), tmp5, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sub, relu, add], Original ATen: [aten.sub, aten.relu, aten.add, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0.run(primals_2, primals_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp4 + tmp1 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x3, tmp5, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0[grid(256)](primals_2 , primals_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_1 del primals_2 return buf0, buf1 class SReLUNew(nn.Module): """Shifted ReLU""" def __init__(self, nc): super(SReLUNew, self).__init__() self.srelu_bias = nn.Parameter(torch.Tensor(1, nc, 1, 1)) self.srelu_relu = nn.ReLU(inplace=True) nn.init.constant_(self.srelu_bias, -1.0) def forward(self, input_0): primals_1 = self.srelu_bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
SheffieldAI/pykale
SReLU
false
14,399
[ "MIT" ]
324
be7670941fb06835883c80477b26702d407017db
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
SoftIoU
import torch import torch.nn as nn import torch._utils class SoftIoU(nn.Module): def __init__(self, from_sigmoid=False, ignore_label=-1): super().__init__() self._from_sigmoid = from_sigmoid self._ignore_label = ignore_label def forward(self, pred, label): label = label.view(pred.size()) sample_weight = label != self._ignore_label if not self._from_sigmoid: pred = torch.sigmoid(pred) loss = 1.0 - torch.sum(pred * label * sample_weight, dim=(1, 2, 3)) / ( torch.sum(torch.max(pred, label) * sample_weight, dim=(1, 2, 3) ) + 1e-08) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = -1.0 tmp5 = tmp2 != tmp4 tmp6 = tmp5.to(tl.float32) tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = triton_helpers.maximum(tmp1, tmp2) tmp13 = tmp12 * tmp6 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = 1e-08 tmp19 = tmp17 + tmp18 tmp20 = tmp11 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_maximum_mul_ne_rsub_sigmoid_sum_0[grid(4)]( buf2, arg1_1, arg0_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SoftIoUNew(nn.Module): def __init__(self, from_sigmoid=False, ignore_label=-1): super().__init__() self._from_sigmoid = from_sigmoid self._ignore_label = ignore_label def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aagaard/ritm_interactive_segmentation
SoftIoU
false
12,034
[ "MIT" ]
0
c68b45a54e99eb5401f50e62f7e43a11e34964ee
https://github.com/aagaard/ritm_interactive_segmentation/tree/c68b45a54e99eb5401f50e62f7e43a11e34964ee
BinaryClassificationHead
from _paritybench_helpers import _mock_config import torch class BinaryClassificationHead(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class BinaryClassificationHeadNew(torch.nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.out_proj = torch.nn.Linear(config.hidden_size, 1) def init_weights(self): self.dense.weight.data.normal_(mean=0.0, std=self.config. initializer_range) if self.dense.bias is not None: self.dense.bias.data.zero_() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BunnyNoBugs/DeepPavlov
BinaryClassificationHead
false
12,322
[ "Apache-2.0" ]
0
b2213db633a669d27d6f745dd780530574ccf8b5
https://github.com/BunnyNoBugs/DeepPavlov/tree/b2213db633a669d27d6f745dd780530574ccf8b5
Gather
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ap/cap2zw6ekgtbbwzmu32cgymplxiqtbms4gchuehkmedzncuuf2hu.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_0.run(arg1_1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gather_result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg0_1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg1_1, buf2, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg0_1 del buf2 return buf3, class GatherNew(torch.nn.Module): """ gather """ @staticmethod def modify_commandline_options(parser, is_train): return parser def __init__(self, F, K, use_mask=False): super().__init__() self.K = K self.F = F self.softmax_2 = nn.Softmax(dim=2) self.use_mask = use_mask if use_mask: self.a = nn.Parameter(torch.randn(F, K), requires_grad=True) """image_codeing [N,F,H,W] s_att (spatial attention): [N,K,H,W] K-how many segment s_att(0 or 50) feature: [N,K,F] """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jhp038/fashion_project
Gather
false
3,718
[ "MIT" ]
0
719533dc60155801f567e6a9183d7a5036ee1166
https://github.com/jhp038/fashion_project/tree/719533dc60155801f567e6a9183d7a5036ee1166
FM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] # Source node to ATen node mapping: # cross_term => sub # cross_term_1 => mul_1 # mul => mul # square_of_sum => pow_1 # sum_1 => sum_1 # sum_3 => sum_3 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1], True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [2]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * import torch.onnx import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class FMNew(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FMNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dulvqingyunLT/DeepCTR-Torch
FM
false
10,317
[ "Apache-2.0" ]
0
f40cf08f3469aa471f9ca69e44c5de51180341cc
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
DeConv2d
import functools import torch from torch import nn from typing import Optional import torch.nn.functional as F class DeConv2d(nn.Conv2d): def __init__(self, in_nc: 'int', out_nc: 'int', kernel_size: 'int', scale: 'int'=2, mode: 'str'='nearest', align_corners: 'Optional[bool]'=None, **kwargs): super().__init__(in_nc, out_nc, kernel_size, **kwargs) self.up = functools.partial(F.interpolate, scale_factor=scale, mode =mode, align_corners=align_corners) def forward(self, x): x = self.up(x) x = super().forward(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_nc': 4, 'out_nc': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import functools from torch import nn from typing import Optional import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class DeConv2dNew(nn.Conv2d): def __init__(self, in_nc: 'int', out_nc: 'int', kernel_size: 'int', scale: 'int'=2, mode: 'str'='nearest', align_corners: 'Optional[bool]'=None, **kwargs): super().__init__(in_nc, out_nc, kernel_size, **kwargs) self.up = functools.partial(F.interpolate, scale_factor=scale, mode =mode, align_corners=align_corners) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pomelyu/ML_HW
DeConv2d
false
10,711
[ "MIT" ]
0
b87697f3ee86592a34d80c8dbf167a5767731630
https://github.com/pomelyu/ML_HW/tree/b87697f3ee86592a34d80c8dbf167a5767731630
RDivFloat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/co/ccog5rgtzipydkyy2hkzhhuz3xrmtwbjws5ymi42pi4qrtmk6eqe.py # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.reciprocal, aten.mul] # Source node to ATen node mapping: # truediv => mul, reciprocal # Graph fragment: # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 100.0), kwargs = {}) triton_poi_fused_mul_reciprocal_0 = async_compile.triton('triton_poi_fused_mul_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.reciprocal, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivFloatNew(torch.nn.Module): def __init__(self): super(RDivFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ilyabasharov/torch2trt
RDivFloat
false
2,539
[ "MIT" ]
0
76bf298b3da408509665e23e2494922b131afb10
https://github.com/Ilyabasharov/torch2trt/tree/76bf298b3da408509665e23e2494922b131afb10
PCC
import torch import torch.nn.functional class PCC(torch.nn.Module): def __init__(self): super(PCC, self).__init__() def pcc(self, y_true, y_pred): A_bar = torch.mean(y_pred, dim=[1, 2, 3, 4], keepdim=True) B_bar = torch.mean(y_true, dim=[1, 2, 3, 4], keepdim=True) top = torch.mean((y_pred - A_bar) * (y_true - B_bar), dim=[1, 2, 3, 4], keepdim=True) bottom = torch.sqrt(torch.mean((y_pred - A_bar) ** 2, dim=[1, 2, 3, 4], keepdim=True) * torch.mean((y_true - B_bar) ** 2, dim=[1, 2, 3, 4], keepdim=True)) return torch.mean(top / bottom) def forward(self, I, J): return 1 - self.pcc(I, J) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp4 = tl.load(in_ptr1 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp3 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp7 / tmp8 tmp12 = tmp4 - tmp11 tmp13 = tmp10 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp10 * tmp10 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = tmp12 * tmp12 tmp22 = tl.broadcast_to(tmp21, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tl.store(out_ptr2 + x0, tmp16, None) tl.store(out_ptr3 + x0, tmp20, None) tl.store(out_ptr4 + x0, tmp24, None) @triton.jit def triton_per_fused_div_mean_mul_pow_rsub_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr2 + r0, None) tmp1 = 256.0 tmp2 = tmp0 / tmp1 tmp4 = tmp3 / tmp1 tmp6 = tmp5 / tmp1 tmp7 = tmp4 * tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = tmp2 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch. float32) buf3 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch. float32) buf4 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch. float32) get_raw_stream(0) triton_per_fused_mean_mul_pow_sub_0[grid(4)](arg0_1, arg1_1, buf2, buf3, buf4, 4, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_div_mean_mul_pow_rsub_sqrt_sub_1[grid(1)](buf6, buf2, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf3 del buf4 return buf6, class PCCNew(torch.nn.Module): def __init__(self): super(PCCNew, self).__init__() def pcc(self, y_true, y_pred): A_bar = torch.mean(y_pred, dim=[1, 2, 3, 4], keepdim=True) B_bar = torch.mean(y_true, dim=[1, 2, 3, 4], keepdim=True) top = torch.mean((y_pred - A_bar) * (y_true - B_bar), dim=[1, 2, 3, 4], keepdim=True) bottom = torch.sqrt(torch.mean((y_pred - A_bar) ** 2, dim=[1, 2, 3, 4], keepdim=True) * torch.mean((y_true - B_bar) ** 2, dim=[1, 2, 3, 4], keepdim=True)) return torch.mean(top / bottom) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
junyuchen245/TransMorph_Transformer_for_Medical_Image_Registration
PCC
false
15,760
[ "MIT" ]
82
dfa24a47a564a000aa9b4eea95a6e83a24568359
https://github.com/junyuchen245/TransMorph_Transformer_for_Medical_Image_Registration/tree/dfa24a47a564a000aa9b4eea95a6e83a24568359
APPNProp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/q5/cq5rthx627l3kn64h6pwzbnpc3ofak65jjjfg5pcra7sqcqgjfe5.py # Topologically Sorted Source Nodes: [mul, mul_1, h], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # h => add # mul => mul # mul_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.9), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = 0.1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg1_1, arg0_1, out=buf0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, h], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, h, mm_1], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf1, out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul_2, mul_3, h_1], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf3, arg0_1, 16, grid=grid(16), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul_2, mul_3, h_1, mm_2], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf3, out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [mul_4, mul_5, h_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf5, arg0_1, 16, grid=grid(16), stream=stream0) buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_4, mul_5, h_2, mm_3], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf5, out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [mul_6, mul_7, h_3], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf7, arg0_1, 16, grid=grid(16), stream=stream0) buf8 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [mul_6, mul_7, h_3, mm_4], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf7, out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [mul_8, mul_9, h_4], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf9, arg0_1, 16, grid=grid(16), stream=stream0) buf10 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [mul_8, mul_9, h_4, mm_5], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf9, out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [mul_10, mul_11, h_5], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf11, arg0_1, 16, grid=grid(16), stream=stream0) buf12 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [mul_10, mul_11, h_5, mm_6], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf11, out=buf12) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [mul_12, mul_13, h_6], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf13, arg0_1, 16, grid=grid(16), stream=stream0) buf14 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [mul_12, mul_13, h_6, mm_7], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf13, out=buf14) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [mul_14, mul_15, h_7], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf15, arg0_1, 16, grid=grid(16), stream=stream0) buf16 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [mul_14, mul_15, h_7, mm_8], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf15, out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [mul_16, mul_17, h_8], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf17, arg0_1, 16, grid=grid(16), stream=stream0) buf18 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [mul_16, mul_17, h_8, mm_9], Original ATen: [aten.mul, aten.add, aten.mm] extern_kernels.mm(arg1_1, buf17, out=buf18) del arg1_1 del buf17 buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [mul_18, mul_19, h_9], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_0.run(buf19, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf19, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp4 = 0.1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, arg0_1, out=buf0) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, buf1, out=buf2) buf3 = buf2 del buf2 triton_poi_fused_add_mul_0[grid(16)](buf3, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf1 del buf1 extern_kernels.mm(arg1_1, buf3, out=buf4) buf5 = buf4 del buf4 triton_poi_fused_add_mul_0[grid(16)](buf5, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf3 del buf3 extern_kernels.mm(arg1_1, buf5, out=buf6) buf7 = buf6 del buf6 triton_poi_fused_add_mul_0[grid(16)](buf7, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf5 del buf5 extern_kernels.mm(arg1_1, buf7, out=buf8) buf9 = buf8 del buf8 triton_poi_fused_add_mul_0[grid(16)](buf9, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = buf7 del buf7 extern_kernels.mm(arg1_1, buf9, out=buf10) buf11 = buf10 del buf10 triton_poi_fused_add_mul_0[grid(16)](buf11, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = buf9 del buf9 extern_kernels.mm(arg1_1, buf11, out=buf12) buf13 = buf12 del buf12 triton_poi_fused_add_mul_0[grid(16)](buf13, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = buf11 del buf11 extern_kernels.mm(arg1_1, buf13, out=buf14) buf15 = buf14 del buf14 triton_poi_fused_add_mul_0[grid(16)](buf15, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf13 del buf13 extern_kernels.mm(arg1_1, buf15, out=buf16) buf17 = buf16 del buf16 triton_poi_fused_add_mul_0[grid(16)](buf17, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = buf15 del buf15 extern_kernels.mm(arg1_1, buf17, out=buf18) del arg1_1 del buf17 buf19 = buf18 del buf18 triton_poi_fused_add_mul_0[grid(16)](buf19, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf19, class SparseDropout(nn.Module): def __init__(self, p=0.5): super().__init__() self.p = p def forward(self, x): if not self.training: return x x_coal = x.coalesce() drop_val = F.dropout(x_coal._values(), self.p, self.training) return torch.sparse.FloatTensor(x_coal._indices(), drop_val, x.shape) class MixedDropout(nn.Module): def __init__(self, p=0.5): super().__init__() self.dense_dropout = nn.Dropout(p) self.sparse_dropout = SparseDropout(p) def forward(self, x): if x.is_sparse: return self.sparse_dropout(x) else: return self.dense_dropout(x) class APPNPropNew(nn.Module): def __init__(self, alpha: 'float'=0.1, K: 'int'=10, dropout: 'float'=0.0): super().__init__() self.alpha = alpha self.K = K if not dropout: self.dropout = lambda x: x else: self.dropout = MixedDropout(dropout) def __repr__(self): return ( f'{self.__class__.__name__}(alpha={self.alpha}, K={self.K}, dropout={self.dropout})' ) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
EdisonLeeeee/Graphgallery
APPNProp
false
5,099
[ "MIT" ]
1
8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
https://github.com/EdisonLeeeee/Graphgallery/tree/8ae9ef57d44f073d0ceaf3f33a3a998546f960a8
C2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/35/c35uoqeuk5xgqj55u5y27rwspn52a5jvnsivhhcescwbt7rm6rnh.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 32 x0 = xindex % 3844 x4 = (xindex // 3844) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/rc/crcvs7gf5dm7znait3krgjw6vt36tx62fnvewm7rx34jlc5n5fqv.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = (xindex // 31) % 31 x2 = (xindex // 961) x5 = xindex x4 = (xindex // 30752) x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (30848*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 492032, grid=grid(492032), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 123008, grid=grid(123008), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 x0 = xindex % 3844 x4 = xindex // 3844 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x2 = xindex // 961 x5 = xindex x4 = xindex // 30752 x6 = xindex % 30752 tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 30848 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = empty_strided_cuda((4, 32, 62, 62), (123904, 3872, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(492032)](buf0, primals_2, buf1, 492032, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 31, 31), (30848, 961, 31, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(123008)](buf1, buf2, buf3, 123008, XBLOCK=512, num_warps=8, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C2New(nn.Module): def __init__(self) ->None: super(C2New, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(16, 32, kernel_size=(3, 3), bias=True)), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c2.c2.weight primals_2 = self.c2.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
devillove084/DeepSignal
C2
false
12,258
[ "MIT" ]
0
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
RMulInt
import torch class RMulInt(torch.nn.Module): def __init__(self): super(RMulInt, self).__init__() def forward(self, x): return 10 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 10.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RMulIntNew(torch.nn.Module): def __init__(self): super(RMulIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ahangchen/torch2trt
RMulInt
false
6,096
[ "MIT" ]
1
53c663f0e0570ef7ffd6771354ae3478f63bd328
https://github.com/ahangchen/torch2trt/tree/53c663f0e0570ef7ffd6771354ae3478f63bd328
IoUnionLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/y7/cy7crqil6ezae27spgmu3xevtvub3gaatpgfs2rnlxr6gaxvjnjz.py # Topologically Sorted Source Nodes: [mul, exp, add, fake, sum_2, sum_3, add_2, union, mul_1, sum_1, intersection, sub, iou, loss], Original ATen: [aten.mul, aten.exp, aten.add, aten.reciprocal, aten.sum, aten.sub, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_2 => add_2 # exp => exp # fake => mul_1, reciprocal # intersection => add_1 # iou => div # loss => sub_1 # mul => mul # mul_1 => mul_2 # sub => sub # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # union => add_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -10), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1.0), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %sub), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {}) triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp12 = tl.load(in_ptr1 + (r0), None) tmp1 = -10.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = tmp7 * tmp4 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp8 * tmp12 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp19 + tmp4 tmp21 = tmp11 + tmp15 tmp22 = tmp21 + tmp4 tmp23 = tmp22 - tmp20 tmp24 = tmp20 / tmp23 tmp25 = tmp4 - tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, exp, add, fake, sum_2, sum_3, add_2, union, mul_1, sum_1, intersection, sub, iou, loss], Original ATen: [aten.mul, aten.exp, aten.add, aten.reciprocal, aten.sum, aten.sub, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp12 = tl.load(in_ptr1 + r0, None) tmp1 = -10.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = tmp7 * tmp4 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp8 * tmp12 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp19 + tmp4 tmp21 = tmp11 + tmp15 tmp22 = tmp21 + tmp4 tmp23 = tmp22 - tmp20 tmp24 = tmp20 / tmp23 tmp25 = tmp4 - tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_exp_mul_reciprocal_rsub_sub_sum_0[grid(1)]( buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class IoUnionLossNew(nn.Module): """ Intersection over Union loss function Args: alpha (default: int=10): Coefficient in exp of sigmoid function smooth (default: int=1): To prevent zero in nominator """ def __init__(self, alpha=10, smooth=1): super().__init__() self.alpha = alpha self.smooth = smooth def sigmoid(self, x): return 1.0 / (1.0 + torch.exp(-self.alpha * x)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
akanametov/pathgan
IoUnionLoss
false
18,300
[ "MIT" ]
8
d93464a9c2490532afdf7bbc0f60decdf2d0767d
https://github.com/akanametov/pathgan/tree/d93464a9c2490532afdf7bbc0f60decdf2d0767d
AddAndNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gi/cgi45knaonkz7f2ymhhwdszxotrkx3bl6ehavmfo5zp3bzzio3zk.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_0 = async_compile.triton('triton_poi_fused_add_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nh/cnhbjzi35eshrjhk7tjj644pcpxhgyi7ajxmdjzgjtdqbnbek7qt.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0.run(primals_1, primals_2, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return buf3, buf2 class AddAndNormNew(nn.Module): def __init__(self, d_model): super(AddAndNormNew, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, input_0, input_1): primals_3 = self.layer_norm.weight primals_4 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
francismontalbo/attention-is-all-you-need-paper
AddAndNorm
false
15,365
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArashVahabpour/encoder4editing-contrastive
EqualLinear
false
13,274
[ "MIT" ]
1,051
1b91afe1693e01a41118e1ce2451b7d14bec51f4
https://github.com/ArashVahabpour/encoder4editing-contrastive/tree/1b91afe1693e01a41118e1ce2451b7d14bec51f4
minibatch_std_concat_layer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/bc/cbcebs6veq3jcchg2s5hkpwbnyxvsyj4d2tmg22tn336tutpb46n.py # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, add, vals, vals_1], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt] # Source node to ATen node mapping: # add => add # mean => mean # mean_1 => mean_1 # pow_1 => pow_1 # sub => sub # vals => sqrt # vals_1 => mean_2 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_mean_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp24 = tl.load(in_ptr0 + (16 + x0), xmask) tmp25 = tl.load(in_ptr0 + (80 + x0), xmask) tmp27 = tl.load(in_ptr0 + (144 + x0), xmask) tmp29 = tl.load(in_ptr0 + (208 + x0), xmask) tmp47 = tl.load(in_ptr0 + (32 + x0), xmask) tmp48 = tl.load(in_ptr0 + (96 + x0), xmask) tmp50 = tl.load(in_ptr0 + (160 + x0), xmask) tmp52 = tl.load(in_ptr0 + (224 + x0), xmask) tmp70 = tl.load(in_ptr0 + (48 + x0), xmask) tmp71 = tl.load(in_ptr0 + (112 + x0), xmask) tmp73 = tl.load(in_ptr0 + (176 + x0), xmask) tmp75 = tl.load(in_ptr0 + (240 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp31 = tmp30 / tmp7 tmp32 = tmp24 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp25 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp29 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp7 tmp44 = tmp43 + tmp21 tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp23 + tmp45 tmp49 = tmp47 + tmp48 tmp51 = tmp49 + tmp50 tmp53 = tmp51 + tmp52 tmp54 = tmp53 / tmp7 tmp55 = tmp47 - tmp54 tmp56 = tmp55 * tmp55 tmp57 = tmp48 - tmp54 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp60 = tmp50 - tmp54 tmp61 = tmp60 * tmp60 tmp62 = tmp59 + tmp61 tmp63 = tmp52 - tmp54 tmp64 = tmp63 * tmp63 tmp65 = tmp62 + tmp64 tmp66 = tmp65 / tmp7 tmp67 = tmp66 + tmp21 tmp68 = libdevice.sqrt(tmp67) tmp69 = tmp46 + tmp68 tmp72 = tmp70 + tmp71 tmp74 = tmp72 + tmp73 tmp76 = tmp74 + tmp75 tmp77 = tmp76 / tmp7 tmp78 = tmp70 - tmp77 tmp79 = tmp78 * tmp78 tmp80 = tmp71 - tmp77 tmp81 = tmp80 * tmp80 tmp82 = tmp79 + tmp81 tmp83 = tmp73 - tmp77 tmp84 = tmp83 * tmp83 tmp85 = tmp82 + tmp84 tmp86 = tmp75 - tmp77 tmp87 = tmp86 * tmp86 tmp88 = tmp85 + tmp87 tmp89 = tmp88 / tmp7 tmp90 = tmp89 + tmp21 tmp91 = libdevice.sqrt(tmp90) tmp92 = tmp69 + tmp91 tmp93 = tmp92 / tmp7 tl.store(out_ptr0 + (x0), tmp93, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fc/cfcpnsijfdjlkqlxyjsxpa4wd2v5qtfunr2guhqosjmgxykzzohf.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %expand], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 5 x0 = xindex % 16 x2 = (xindex // 80) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, add, vals, vals_1], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_pow_sqrt_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf0, buf1, 320, grid=grid(320), stream=stream0) del arg0_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_pow_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp24 = tl.load(in_ptr0 + (16 + x0), xmask) tmp25 = tl.load(in_ptr0 + (80 + x0), xmask) tmp27 = tl.load(in_ptr0 + (144 + x0), xmask) tmp29 = tl.load(in_ptr0 + (208 + x0), xmask) tmp47 = tl.load(in_ptr0 + (32 + x0), xmask) tmp48 = tl.load(in_ptr0 + (96 + x0), xmask) tmp50 = tl.load(in_ptr0 + (160 + x0), xmask) tmp52 = tl.load(in_ptr0 + (224 + x0), xmask) tmp70 = tl.load(in_ptr0 + (48 + x0), xmask) tmp71 = tl.load(in_ptr0 + (112 + x0), xmask) tmp73 = tl.load(in_ptr0 + (176 + x0), xmask) tmp75 = tl.load(in_ptr0 + (240 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp31 = tmp30 / tmp7 tmp32 = tmp24 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp25 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp29 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp7 tmp44 = tmp43 + tmp21 tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp23 + tmp45 tmp49 = tmp47 + tmp48 tmp51 = tmp49 + tmp50 tmp53 = tmp51 + tmp52 tmp54 = tmp53 / tmp7 tmp55 = tmp47 - tmp54 tmp56 = tmp55 * tmp55 tmp57 = tmp48 - tmp54 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp60 = tmp50 - tmp54 tmp61 = tmp60 * tmp60 tmp62 = tmp59 + tmp61 tmp63 = tmp52 - tmp54 tmp64 = tmp63 * tmp63 tmp65 = tmp62 + tmp64 tmp66 = tmp65 / tmp7 tmp67 = tmp66 + tmp21 tmp68 = libdevice.sqrt(tmp67) tmp69 = tmp46 + tmp68 tmp72 = tmp70 + tmp71 tmp74 = tmp72 + tmp73 tmp76 = tmp74 + tmp75 tmp77 = tmp76 / tmp7 tmp78 = tmp70 - tmp77 tmp79 = tmp78 * tmp78 tmp80 = tmp71 - tmp77 tmp81 = tmp80 * tmp80 tmp82 = tmp79 + tmp81 tmp83 = tmp73 - tmp77 tmp84 = tmp83 * tmp83 tmp85 = tmp82 + tmp84 tmp86 = tmp75 - tmp77 tmp87 = tmp86 * tmp86 tmp88 = tmp85 + tmp87 tmp89 = tmp88 / tmp7 tmp90 = tmp89 + tmp21 tmp91 = libdevice.sqrt(tmp90) tmp92 = tmp69 + tmp91 tmp93 = tmp92 / tmp7 tl.store(out_ptr0 + x0, tmp93, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 5 x0 = xindex % 16 x2 = xindex // 80 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_pow_sqrt_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(320)](arg0_1, buf0, buf1, 320, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 del buf0 return buf1, class minibatch_std_concat_layerNew(nn.Module): def __init__(self, averaging='all'): super(minibatch_std_concat_layerNew, self).__init__() self.averaging = averaging.lower() if 'group' in self.averaging: self.n = int(self.averaging[5:]) else: assert self.averaging in ['all', 'flat', 'spatial', 'none', 'gpool' ], 'Invalid averaging mode' % self.averaging self.adjusted_std = lambda x, **kwargs: torch.sqrt(torch.mean((x - torch.mean(x, **kwargs)) ** 2, **kwargs) + 1e-08) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
achrefjarray/ESRGANplus-master
minibatch_std_concat_layer
false
1,367
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
SConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mr/cmrzofxtfa5fe3ax4o3n5qvgpvhbgcrspjauzarmp4t443npav4h.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul] # Source node to ATen node mapping: # weight => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.1767766952966369), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.1767766952966369 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf2, buf0, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1767766952966369 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, buf0, primals_3, buf0 def quick_scale(module, name='weight'): ScaleW.apply(module, name) return module class ScaleW: """ Constructor: name - name of attribute to be scaled """ def __init__(self, name): self.name = name def scale(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * math.sqrt(2 / fan_in) @staticmethod def apply(module, name): """ Apply runtime scaling to specific module """ hook = ScaleW(name) weight = getattr(module, name) module.register_parameter(name + '_orig', nn.Parameter(weight.data)) del module._parameters[name] module.register_forward_pre_hook(hook) def __call__(self, module, whatever): weight = self.scale(module) setattr(module, self.name, weight) class SConv2dNew(nn.Module): def __init__(self, *args, **kwargs): super().__init__() conv = nn.Conv2d(*args, **kwargs) conv.weight.data.normal_() conv.bias.data.zero_() self.conv = quick_scale(conv) def forward(self, input_0): primals_2 = self.conv.bias primals_1 = self.conv.weight_orig primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
sergkuzn148/stg
SConv2d
false
16,383
[ "MIT" ]
96
84d9f53ae3665c423836a4d0176dc3b22de62b19
https://github.com/sergkuzn148/stg/tree/84d9f53ae3665c423836a4d0176dc3b22de62b19
GELU
import math import torch from torch import nn class GELU(nn.Module): def __init__(self): super(GELU, self).__init__() def forward(self, tensor): geluPow = tensor + 0.044715 * torch.pow(tensor, 3) geluTanh = torch.tanh(math.sqrt(2 / math.pi) * geluPow) geluResult = 1 + geluTanh return 0.5 * tensor * geluResult def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(nn.Module): def __init__(self): super(GELUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
simonepreite/QABERT
GELU
false
4,334
[ "MIT" ]
0
ed3e49f6619f3ff660068291231909693cb8f5d5
https://github.com/simonepreite/QABERT/tree/ed3e49f6619f3ff660068291231909693cb8f5d5
BasicModel6_MultiTensor
import torch import torch.nn as nn import torch.nn.functional as F class BasicModel6_MultiTensor(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, input1, input2): input = input1 + input2 return 1 - F.relu(1 - input)[:, 1] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp3 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class BasicModel6_MultiTensorNew(nn.Module): def __init__(self) ->None: super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LMdeLiangMi/captum
BasicModel6_MultiTensor
false
5,478
[ "BSD-3-Clause" ]
1
8bd9686013fe0ba8996e9b1cbeb0ea8e91512787
https://github.com/LMdeLiangMi/captum/tree/8bd9686013fe0ba8996e9b1cbeb0ea8e91512787
DQN_hot5
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class DQN_hot5(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation Params ------ m, n: int Board size num_actions: int Number of action-value to output, one-to-one correspondence to action in game. """ def __init__(self, m, n, num_actions): super(DQN_hot5, self).__init__() self.fc1 = nn.Linear(m * n, 1000) self.fc2 = nn.Linear(1000, 2000) self.fc3 = nn.Linear(2000, num_actions) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 1000 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2000 x1 = xindex // 2000 tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1000, 16), (16, 1)) assert_size_stride(primals_3, (1000,), (1,)) assert_size_stride(primals_4, (2000, 1000), (1000, 1)) assert_size_stride(primals_5, (2000,), (1,)) assert_size_stride(primals_6, (4, 2000), (2000, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 1000), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(4000)](buf1, primals_3, 4000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1000, 2000), (1, 1000), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8000)](buf3, primals_5, 8000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf1, buf3, primals_6, primals_4 class DQN_hot5New(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation Params ------ m, n: int Board size num_actions: int Number of action-value to output, one-to-one correspondence to action in game. """ def __init__(self, m, n, num_actions): super(DQN_hot5New, self).__init__() self.fc1 = nn.Linear(m * n, 1000) self.fc2 = nn.Linear(1000, 2000) self.fc3 = nn.Linear(2000, num_actions) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CoAxLab/azad
DQN_hot5
false
17,188
[ "MIT" ]
6
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
SelfAttn
import torch from torch import nn from torch.nn import functional as F class SelfAttn(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, inp): scores = F.softmax(self.scorer(inp), dim=1) cont = scores.transpose(1, 2).bmm(inp).squeeze(1) return cont def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dhid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), primals_3, out=buf4) del buf3 return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1 class SelfAttnNew(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, input_0): primals_1 = self.scorer.weight primals_2 = self.scorer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jzhanson/alfred
SelfAttn
false
3,792
[ "MIT" ]
0
d5b540e7c9b53d3f70cc2907503935fecff00018
https://github.com/jzhanson/alfred/tree/d5b540e7c9b53d3f70cc2907503935fecff00018
GEGLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/em/cemg2y45l6deao7jdm2w6eokhhfga7r2ohhlsue6j5tme4ofmwni.py # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul] # Source node to ATen node mapping: # g => add, erf, mul, mul_1, mul_2 # x => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view_3), kwargs = {}) triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_gelu_mul_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp9 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GEGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
robburdon/pytorch_tabular
GEGLU
false
16,332
[ "MIT" ]
560
9bf75f22c6e1b3033ad699713e77c283d55f3555
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
SmoothL1Loss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1Loss(nn.Module): def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, ** kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_lt_mean_mul_sub_where_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1LossNew(nn.Module): def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1LossNew, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AllenPeng0209/SaccadeNet
SmoothL1Loss
false
7,651
[ "Apache-2.0" ]
30
0fce4266cbffc9a2c5f70335efa636da849ce70c
https://github.com/AllenPeng0209/SaccadeNet/tree/0fce4266cbffc9a2c5f70335efa636da849ce70c
squeeze
import torch import torch.nn as nn class squeeze(nn.Module): def __init__(self, block_size): super(squeeze, self).__init__() self.block_size = block_size self.block_size_sq = block_size * block_size def inverse(self, input): output = input.permute(0, 2, 3, 1) batch_size, d_height, d_width, d_depth = output.size() s_depth = int(d_depth / self.block_size_sq) s_width = int(d_width * self.block_size) s_height = int(d_height * self.block_size) t_1 = output.contiguous().view(batch_size, d_height, d_width, self. block_size_sq, s_depth) spl = t_1.split(self.block_size, 3) stack = [t_t.contiguous().view(batch_size, d_height, s_width, s_depth) for t_t in spl] output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4 ).contiguous().view(batch_size, s_height, s_width, s_depth) output = output.permute(0, 3, 1, 2) return output.contiguous() def forward(self, input): output = input.permute(0, 2, 3, 1) batch_size, s_height, _s_width, s_depth = output.size() d_depth = s_depth * self.block_size_sq d_height = int(s_height / self.block_size) t_1 = output.split(self.block_size, 2) stack = [t_t.contiguous().view(batch_size, d_height, d_depth) for t_t in t_1] output = torch.stack(stack, 1) output = output.permute(0, 2, 1, 3) output = output.permute(0, 3, 1, 2) return output.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'block_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0), class squeezeNew(nn.Module): def __init__(self, block_size): super(squeezeNew, self).__init__() self.block_size = block_size self.block_size_sq = block_size * block_size def inverse(self, input): output = input.permute(0, 2, 3, 1) batch_size, d_height, d_width, d_depth = output.size() s_depth = int(d_depth / self.block_size_sq) s_width = int(d_width * self.block_size) s_height = int(d_height * self.block_size) t_1 = output.contiguous().view(batch_size, d_height, d_width, self. block_size_sq, s_depth) spl = t_1.split(self.block_size, 3) stack = [t_t.contiguous().view(batch_size, d_height, s_width, s_depth) for t_t in spl] output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4 ).contiguous().view(batch_size, s_height, s_width, s_depth) output = output.permute(0, 3, 1, 2) return output.contiguous() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lingzenan/invertible-resnet
squeeze
false
7,091
[ "MIT" ]
1
57b1c0de51a885aed074b77628f3b0c85c548e70
https://github.com/lingzenan/invertible-resnet/tree/57b1c0de51a885aed074b77628f3b0c85c548e70
mnistmodel_B
import torch from torch import nn import torch.nn.functional as F class mnistmodel_B(nn.Module): def __init__(self): super(mnistmodel_B, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size= 8, stride=2, padding=3) self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=6, stride=2) self.conv3 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, stride=1) self.dense1 = nn.Linear(in_features=128, out_features=128) self.dense2 = nn.Linear(in_features=128, out_features=10) def forward(self, x): x = F.dropout(x, 0.2) x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(-1, 128) x = F.relu(self.dense1(x)) x = F.dropout(x, 0.5) x = self.dense2(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + 64 * x2 + 65536 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 512 xnumel = 100 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 128 y1 = yindex // 128 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 12800 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 100 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 128 * x2 + 12800 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (64, 1, 8, 8), (64, 64, 8, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128), (128, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (10, 128), (128, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(8192, 36)](primals_4, buf0, 8192, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_1[grid(16384, 25)](primals_6, buf1, 16384, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf2 = torch.ops.aten.native_dropout.default(primals_1, 0.2, True) del primals_1 buf3 = buf2[0] del buf2 buf5 = extern_kernels.convolution(buf3, primals_2, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf6 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_convolution_relu_2[grid(256, 1024)](buf5, primals_3, buf6, 256, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf5 del primals_3 buf7 = extern_kernels.convolution(buf6, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 14, 14), (25088, 1, 1792, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_3[grid(100352)](buf8, primals_5, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf10 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) buf18 = empty_strided_cuda((4, 128, 10, 10), (12800, 1, 1280, 128), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(512, 100)]( buf9, primals_7, buf10, buf18, 512, 100, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf9, (400, 128), (128, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (400, 128), (128, 1), 0 ), reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out= buf11) buf12 = buf11 del buf11 buf17 = empty_strided_cuda((400, 128), (128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(51200)](buf12, primals_9, buf17, 51200, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf13 = torch.ops.aten.native_dropout.default(buf12, 0.5, True) del buf12 buf14 = buf13[0] buf15 = buf13[1] del buf13 buf16 = empty_strided_cuda((400, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf14, reinterpret_tensor( primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf16) del primals_11 return buf16, primals_2, buf0, buf1, buf3, buf6, buf8, reinterpret_tensor( buf10, (400, 128), (128, 1), 0 ), buf14, buf15, primals_10, buf17, primals_8, buf18 class mnistmodel_BNew(nn.Module): def __init__(self): super(mnistmodel_BNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size= 8, stride=2, padding=3) self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=6, stride=2) self.conv3 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, stride=1) self.dense1 = nn.Linear(in_features=128, out_features=128) self.dense2 = nn.Linear(in_features=128, out_features=10) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.dense1.weight primals_9 = self.dense1.bias primals_10 = self.dense2.weight primals_11 = self.dense2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
layel2/layyer-lib
mnistmodel_B
false
3,877
[ "MIT" ]
0
db48b5c38098ee93d2d34693d98e5ef4d319d919
https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919
core_network
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/sf/csfkqsknybmr6wr4qghsq2q5fkm44yw64vallsxwzclanv6zrtso.py # Topologically Sorted Source Nodes: [add, h_t], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # h_t => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, h_t], Original ATen: [aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0.run(buf2, primals_2, buf1, primals_5, buf3, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_5 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2, primals_2, buf1, primals_5, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf3 class core_networkNew(nn.Module): """ An RNN that maintains an internal state that integrates information extracted from the history of past observations. It encodes the agent's knowledge of the environment through a state vector `h_t` that gets updated at every time step `t`. Concretely, it takes the glimpse representation `g_t` as input, and combines it with its internal state `h_t_prev` at the previous time step, to produce the new internal state `h_t` at the current time step. In other words: `h_t = relu( fc(h_t_prev) + fc(g_t) )` Args ---- - input_size: input size of the rnn. - hidden_size: hidden size of the rnn. - g_t: a 2D tensor of shape (B, hidden_size). The glimpse representation returned by the glimpse network for the current timestep `t`. - h_t_prev: a 2D tensor of shape (B, hidden_size). The hidden state vector for the previous timestep `t-1`. Returns ------- - h_t: a 2D tensor of shape (B, hidden_size). The hidden state vector for the current timestep `t`. """ def __init__(self, input_size, hidden_size): super(core_networkNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, hidden_size) self.h2h = nn.Linear(hidden_size, hidden_size) def forward(self, input_0, input_1): primals_1 = self.i2h.weight primals_2 = self.i2h.bias primals_4 = self.h2h.weight primals_5 = self.h2h.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
felixnon/foveated-visual-attention
core_network
false
12,366
[ "MIT" ]
0
7e7d9a5ef24ec42eb76ba72f783bb2227bdb4851
https://github.com/felixnon/foveated-visual-attention/tree/7e7d9a5ef24ec42eb76ba72f783bb2227bdb4851
Autoencoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/sm/csmenfx32ekzp3jy4hjjyfdm37wlwf77smzilpa5revmsd7esfwf.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ox/coxzkq4kueijjjccjhdtvzhfquura4nj5lgusydc343b4la3telx.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view] # Source node to ATen node mapping: # x_2 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_5, buf11, 256, grid=grid(256), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf7, primals_7, buf10, 256, grid=grid(256), stream=stream0) del primals_7 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf7, (64, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_9 return (reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf8, primals_8, buf10, primals_6, buf11, primals_4, buf12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf4, primals_5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0) del buf4 extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4 ), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7, primals_7, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (64, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_9 return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf8, primals_8, buf10, primals_6, buf11, primals_4, buf12 class AutoencoderNew(nn.Module): def __init__(self, input_dim, output_dim, n_hid, n_bottleneck): super(AutoencoderNew, self).__init__() self.fc1 = nn.Linear(input_dim, n_hid) self.fc2 = nn.Linear(n_hid, n_bottleneck) self.fc3 = nn.Linear(n_bottleneck, n_hid) self.fc4 = nn.Linear(n_hid, output_dim) self.act = nn.ReLU(inplace=True) def get_features(self, x): return None def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
JavierAntoran/tiger-costume
Autoencoder
false
17,458
[ "MIT" ]
10
975661dfab2c435281f74c6be86529b16881ebcb
https://github.com/JavierAntoran/tiger-costume/tree/975661dfab2c435281f74c6be86529b16881ebcb
TVLoss
import torch from torch import nn import torch.utils.data class TVLoss(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLoss, self).__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum() w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w ) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 0.020833333333333332 tmp17 = tmp7 * tmp16 tmp18 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = 0.25 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TVLossNew(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLossNew, self).__init__() self.tv_loss_weight = tv_loss_weight @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Prajwal564/SRGAN
TVLoss
false
9,407
[ "MIT" ]
0
198b86b0cec4d68737f26b190e4ab04887be4ac3
https://github.com/Prajwal564/SRGAN/tree/198b86b0cec4d68737f26b190e4ab04887be4ac3
SEBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bp/cbp53nzkr6hcnl4m7bbtbj2xgxgdd2wpmbdcnrgjzy6l4el2ypa7.py # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # x_3 => convolution_1 # x_4 => sigmoid # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_sigmoid_2 = async_compile.triton('triton_poi_fused_convolution_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yz/cyzraf5hkq5g34ofp7xcct5u6pj5qtysku2ga222bud7kopwrvo5.py # Topologically Sorted Source Nodes: [x_5, mul], Original ATen: [aten.repeat, aten.mul] # Source node to ATen node mapping: # mul => mul # x_5 => repeat # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%sigmoid, [1, 1, 4, 4]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %repeat), kwargs = {}) triton_poi_fused_mul_repeat_3 = async_compile.triton('triton_poi_fused_mul_repeat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_2.run(buf4, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, mul], Original ATen: [aten.repeat, aten.mul] triton_poi_fused_mul_repeat_3.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_sigmoid_2[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_repeat_3[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4 class SEBlockNew(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlockNew, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) def forward(self, input_0): primals_2 = self.down.weight primals_3 = self.down.bias primals_4 = self.up.weight primals_5 = self.up.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sysu-shey/ACNet
SEBlock
false
16,523
[ "MIT" ]
767
6d967d3fff2d79a37f85799b78a21ffbd9001bd2
https://github.com/sysu-shey/ACNet/tree/6d967d3fff2d79a37f85799b78a21ffbd9001bd2
UpSample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/qh/cqhya6theq3cyi3hhxcsdbxfd6fgb5momtoovf7apj7ashpshxiz.py # Topologically Sorted Source Nodes: [feat_1], Original ATen: [aten.pixel_shuffle] # Source node to ATen node mapping: # feat_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_pixel_shuffle_0 = async_compile.triton('triton_poi_fused_pixel_shuffle_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 2], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pixel_shuffle_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 512 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 4 y1 = (yindex // 4) % 2 y2 = (yindex // 8) % 4 y6 = (yindex // 32) y3 = (yindex // 32) % 4 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*y2) + (16*x5) + (32*y1) + (64*y6)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (2*y1) + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + (2*y7)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [feat], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [feat_1], Original ATen: [aten.pixel_shuffle] stream0 = get_raw_stream(0) triton_poi_fused_pixel_shuffle_0.run(buf0, primals_2, buf1, 512, 2, grid=grid(512, 2), stream=stream0) del buf0 del primals_2 return (reinterpret_tensor(buf1, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pixel_shuffle_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 4 y1 = yindex // 4 % 2 y2 = yindex // 8 % 4 y6 = yindex // 32 y3 = yindex // 32 % 4 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x5 + 32 * y1 + 64 * y6), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 2 * y1 + 4 * y3), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + 2 * y7), tmp2, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_pixel_shuffle_0[grid(512, 2)](buf0, primals_2, buf1, 512, 2, XBLOCK=2, YBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 8, 8), (256, 64, 8, 1), 0 ), primals_1, primals_3 class UpSampleNew(nn.Module): def __init__(self, n_chan, factor=2): super(UpSampleNew, self).__init__() out_chan = n_chan * factor * factor self.proj = nn.Conv2d(n_chan, out_chan, 1, 1, 0) self.up = nn.PixelShuffle(factor) self.init_weight() def init_weight(self): nn.init.xavier_normal_(self.proj.weight, gain=1.0) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AlexanderDokuchaev/mmsegmentation
UpSample
false
11,185
[ "Apache-2.0" ]
0
0c443ee370cce6227661b802184072174c4e3f64
https://github.com/AlexanderDokuchaev/mmsegmentation/tree/0c443ee370cce6227661b802184072174c4e3f64
ChannelAvgPool
import torch import torch.nn as nn import torch.nn.functional as F class ChannelAvgPool(nn.AvgPool1d): def forward(self, input): n, c, w, h = input.size() input = input.view(n, c, w * h).permute(0, 2, 1) pooled = F.avg_pool1d(input, self.kernel_size, self.stride, self. padding, self.ceil_mode, self.count_include_pad) return pooled.permute(0, 2, 1).view(n, 1, w, h) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), class ChannelAvgPoolNew(nn.AvgPool1d): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
joeization/CycleGAN
ChannelAvgPool
false
3,752
[ "MIT" ]
0
9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
https://github.com/joeization/CycleGAN/tree/9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
LeNet_300_100
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gi/cgisfp7cjgya6wz3zy6wgsfrei7pmk7oec6olmpydu3ckl4g7flh.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/d7/cd7rilnjd42cirsc5dhnnwlficmjz5omrtsdfojgouhplcpynn4n.py # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x2 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fq/cfqeaxpwvkwok3wdzj4x74nonwbnkdl7jv6hborhhhugwqldix3l.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_2 = async_compile.triton('triton_per_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (300, 784), (784, 1)) assert_size_stride(primals_3, (300, ), (1, )) assert_size_stride(primals_4, (100, 300), (300, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (10, 100), (100, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 300), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1200, grid=grid(1200), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (300, 100), (1, 300), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 400, grid=grid(400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_2.run(buf4, buf7, 4, 10, grid=grid(4), stream=stream0) del buf4 return (buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((300, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (300, 784), (784, 1)) assert_size_stride(primals_3, (300,), (1,)) assert_size_stride(primals_4, (100, 300), (300, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (10, 100), (100, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 300), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1200)](buf1, primals_3, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (300, 100), ( 1, 300), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_2[grid(4)](buf4, buf7, 4, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf4 return buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4 class LeNet_300_100New(nn.Module): """Simple NN with hidden layers [300, 100] Based on https://github.com/mi-lad/snip/blob/master/train.py by Milad Alizadeh. """ def __init__(self, save_features=None, bench_model=False): super(LeNet_300_100New, self).__init__() self.fc1 = nn.Linear(28 * 28, 300, bias=True) self.fc2 = nn.Linear(300, 100, bias=True) self.fc3 = nn.Linear(100, 10, bias=True) self.mask = None def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
VITA-Group/SViTE
LeNet_300_100
false
14,553
[ "MIT" ]
50
b0c62fd153c8b0b99917ab935ee76925c9de1149
https://github.com/VITA-Group/SViTE/tree/b0c62fd153c8b0b99917ab935ee76925c9de1149
RewardCriterion
import torch from torch import nn from torch.autograd import * class RewardCriterion(nn.Module): def __init__(self): super(RewardCriterion, self).__init__() def forward(self, input, seq, reward, reduction='mean'): N, L = input.shape[:2] input = input.gather(2, seq.unsqueeze(2)).squeeze(2) input = input.reshape(-1) reward = reward.reshape(-1) mask = seq > 0 mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1 ).reshape(-1) output = -input * reward * mask if reduction == 'none': output = output.view(N, L).sum(1) / mask.view(N, L).sum(1) elif reduction == 'mean': output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = r0 % 4 tmp12 = tl.full([1, 1], 0, tl.int64) tmp14 = tl.full([1, 1], 1, tl.int64) tmp15 = tmp11 < tmp14 tmp16 = tl.full([1, 1], True, tl.int1) tmp17 = tl.full(tmp16.shape, False, tmp16.dtype) tmp18 = tl.where(tmp15, tmp16, tmp17) tmp19 = tmp11 >= tmp14 tl.full([1, 1], 4, tl.int64) tmp22 = tl.load(in_ptr0 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4), [XBLOCK, RBLOCK]), tmp19, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 > tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp19, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp18, tmp25) tmp27 = tmp26.to(tl.float32) tmp28 = tmp10 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.sum(tmp29, 1)[:, None] tmp32 = tmp26.to(tl.int64) tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp36 = tmp35.to(tl.float32) tmp37 = tmp31 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class RewardCriterionNew(nn.Module): def __init__(self): super(RewardCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
GeorgeKostenkov/ImageCaptioning.pytorch
RewardCriterion
false
11,442
[ "MIT" ]
0
8f17433fdaba2f89774e45ad5a3a88b880932ee6
https://github.com/GeorgeKostenkov/ImageCaptioning.pytorch/tree/8f17433fdaba2f89774e45ad5a3a88b880932ee6
ResidualLayer
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayer(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayer, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, x): return x + self.act(self.lin2(self.act(self.lin1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_2, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayerNew(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayerNew, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
shnhrtkyk/pytorch_geometric
ResidualLayer
false
10,811
[ "MIT" ]
0
b971fd2ebba10736e6398d6305757be2d81ca681
https://github.com/shnhrtkyk/pytorch_geometric/tree/b971fd2ebba10736e6398d6305757be2d81ca681
RankingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/sb/csb3yn2vfnide3womhvmhsqxjqxl375rkxhqvgtsa3dhf6oka6pp.py # Topologically Sorted Source Nodes: [clamp, ones, binary_cross_entropy, pred_loss_1], Original ATen: [aten.clamp, aten.sign, aten.binary_cross_entropy, aten.sigmoid] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub_2, sub_3 # clamp => clamp_min # ones => sign # pred_loss_1 => sigmoid # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%slice_1, 0), kwargs = {}) # %sign : [num_users=2] = call_function[target=torch.ops.aten.sign.default](args = (%clamp_min,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sign, 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_2,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sigmoid,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, %maximum_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 r1 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr0 + (192 + r0 + ((-64)*r1)), None) tmp14 = tl.load(in_ptr1 + (r2), None) tmp15 = tl.load(in_ptr1 + (192 + r0 + ((-64)*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = tmp5 < tmp4 tmp7 = tmp6.to(tl.int8) tmp8 = tmp4 < tmp5 tmp9 = tmp8.to(tl.int8) tmp10 = tmp7 - tmp9 tmp11 = tmp10.to(tmp4.dtype) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = -tmp17 tmp19 = libdevice.log1p(tmp18) tmp20 = -100.0 tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tmp13 * tmp21 tmp23 = tl_math.log(tmp17) tmp24 = triton_helpers.maximum(tmp23, tmp20) tmp25 = tmp11 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 128.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [clamp, ones, binary_cross_entropy, pred_loss_1], Original ATen: [aten.clamp, aten.sign, aten.binary_cross_entropy, aten.sigmoid] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0.run(buf1, arg0_1, arg1_1, 1, 128, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 r1 = rindex // 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + (192 + r0 + -64 * r1), None) tmp14 = tl.load(in_ptr1 + r2, None) tmp15 = tl.load(in_ptr1 + (192 + r0 + -64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = tmp5 < tmp4 tmp7 = tmp6.to(tl.int8) tmp8 = tmp4 < tmp5 tmp9 = tmp8.to(tl.int8) tmp10 = tmp7 - tmp9 tmp11 = tmp10.to(tmp4.dtype) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = -tmp17 tmp19 = libdevice.log1p(tmp18) tmp20 = -100.0 tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tmp13 * tmp21 tmp23 = tl_math.log(tmp17) tmp24 = triton_helpers.maximum(tmp23, tmp20) tmp25 = tmp11 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = 128.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_clamp_sigmoid_sign_0[grid(1)]( buf1, arg0_1, arg1_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RankingLossNew(nn.Module): def __init__(self): super().__init__() self.bce = nn.BCELoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
KMU-AELAB/Active_Learning
RankingLoss
false
2,444
[ "MIT" ]
0
bc569c16b5f12b58989a8f3db59b7eb4e35cce1b
https://github.com/KMU-AELAB/Active_Learning/tree/bc569c16b5f12b58989a8f3db59b7eb4e35cce1b
MultiHeadAttn
import torch import torch.cuda from torch import nn import torch.nn.functional as F import torch.distributed import torch.optim class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def forward(self, inp, attn_mask=None): return self._forward(inp, attn_mask) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask, -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size( 0), inp.size(1), n_head * d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = residual + attn_out else: output = self.layer_norm(residual + attn_out) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.cuda from torch import nn import torch.nn.functional as F import torch.distributed import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (32 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (32 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (48, 4), (4, 1)) assert_size_stride(primals_3, (48,), (1,)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 48), (48, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](buf0, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf0, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_6 return buf12, primals_1, primals_5, buf6, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0) class MultiHeadAttnNew(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttnNew, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask, -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size( 0), inp.size(1), n_head * d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = residual + attn_out else: output = self.layer_norm(residual + attn_out) return output def forward(self, input_0): primals_2 = self.qkv_net.weight primals_3 = self.qkv_net.bias primals_4 = self.o_net.weight primals_5 = self.layer_norm.weight primals_6 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
admariner/NeMo
MultiHeadAttn
false
1,394
[ "Apache-2.0" ]
0
e542d7f9063a40afa4119a3b94de4c2c636a37bb
https://github.com/admariner/NeMo/tree/e542d7f9063a40afa4119a3b94de4c2c636a37bb
L1Loss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1Loss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1LossNew(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(L1LossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CK-er/mmdet
L1Loss
false
2,063
[ "Apache-2.0" ]
0
9bea4068efbcf7bf739dbe41917a68d525c29868
https://github.com/CK-er/mmdet/tree/9bea4068efbcf7bf739dbe41917a68d525c29868
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/y7/cy75pv62flzgueapzlnlplljec2lmbrjxeuhmgi2gb5pjyl6sna4.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ur/curhmk5y5q3p75evjihs7kgjnfdmg2wtto34yur6beusgb5lyduh.py # Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/r2/cr2h3252vsveia733y3hc44mzgu7rann2uwjgvo4xn54fam36ec5.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # output_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6a/c6ay25nxozjje6lo7ldghziivuwdddyjybryyhc4wnnw3ugkoobb.py # Topologically Sorted Source Nodes: [add, output_4], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output_4 => clone_1, var_mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qh/cqhud5pr2n7orqbv5pdlb2l35z7vhlv6okumz3vydoqn6u4n5ipk.py # Topologically Sorted Source Nodes: [add, output_4], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # output_4 => add_1, add_2, clone_1, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, output_4], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, output_4], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0) del buf5 del buf6 del primals_7 return (buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CSLT-THU/Vivi_3.0
PositionwiseFeedForward
false
17,029
[ "Apache-2.0" ]
3
86996d99d662cd33100755501a971c41ce30ca70
https://github.com/CSLT-THU/Vivi_3.0/tree/86996d99d662cd33100755501a971c41ce30ca70
HSwish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py # Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] # Source node to ATen node mapping: # add => add # mul => mul # relu6 => clamp_max, clamp_min # truediv => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn import torch.nn.functional import torch.optim import torch.nn.parallel import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HSwishNew(nn.Module): """ Applies the Hard-Swish function element-wise. `"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_ Examples: >>> m = Mish() >>> x = torch.randn(2) >>> output = m(x) """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PhelaPoscam/SRGAN-PyTorch
HSwish
false
957
[ "Apache-2.0" ]
0
c1c68707dbddd1130b2ea71023df748080bcbd52
https://github.com/PhelaPoscam/SRGAN-PyTorch/tree/c1c68707dbddd1130b2ea71023df748080bcbd52
Policy
import torch import torch.nn as nn import torch.nn.functional as F class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, x): x = F.relu(self.affine1(x)) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 128), (128, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 class PolicyNew(nn.Module): def __init__(self): super(PolicyNew, self).__init__() self.affine1 = nn.Linear(4, 128) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Chandan-h-509/ignite
Policy
false
8,976
[ "BSD-3-Clause" ]
0
f8c39828cb1dac49b6ef358cdf77865bf2430106
https://github.com/Chandan-h-509/ignite/tree/f8c39828cb1dac49b6ef358cdf77865bf2430106
MPJPELoss
import torch import torch.nn as nn class MPJPELoss(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: batch_size: N num_keypoints: K dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = torch.mean(torch.norm((output - target) * target_weight, dim=-1)) else: loss = torch.mean(torch.norm(output - target, dim=-1)) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tmp25 = 1.0 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MPJPELossNew(nn.Module): """MPJPE (Mean Per Joint Position Error) loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZephyrII/mmpose_charger
MPJPELoss
false
12,029
[ "Apache-2.0" ]
0
ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
https://github.com/ZephyrII/mmpose_charger/tree/ca5f7ab439ae40c4ceab2c6fd1d58112dc0ea7cd
BertPredictionHeadTransform
from _paritybench_helpers import _mock_config import math import torch from torch import nn def gelu(x): """Gaussian Error Linear Unitという活性化関数です。 LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """LayerNormalization層です。 学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。 オリジナルのGitHubの実装から変数名を変えています。 weight→gamma、bias→beta """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertPredictionHeadTransform(nn.Module): """MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)""" def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): """hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + x0, tmp43, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = 1e-12 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp11 / tmp15 tmp17 = tmp0 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4, buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 def gelu(x): """Gaussian Error Linear Unitという活性化関数です。 LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """LayerNormalization層です。 学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。 オリジナルのGitHubの実装から変数名を変えています。 weight→gamma、bias→beta """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertPredictionHeadTransformNew(nn.Module): """MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)""" def __init__(self, config): super(BertPredictionHeadTransformNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, input_0): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_4 = self.LayerNorm.gamma primals_5 = self.LayerNorm.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Cyndi-Tokyotech/Fin_Text_Analysis_ML
BertPredictionHeadTransform
false
11,120
[ "MIT" ]
0
7f9b6c1ea78f8e6f32c003b2de32809722df88d4
https://github.com/Cyndi-Tokyotech/Fin_Text_Analysis_ML/tree/7f9b6c1ea78f8e6f32c003b2de32809722df88d4
Expand
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/hd/chdczdfwoeph362wur27ffhsorgcehnjtoyqyws4eeeneunwakso.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 2], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y1 = (yindex // 4) % 2 y2 = (yindex // 8) % 4 y3 = (yindex // 32) y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*y2) + (16*x4) + (32*y1) + (64*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + (2*y5)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 2, 4, 2), (64, 64, 16, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 128, 2, grid=grid(128, 2), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1, 8, 8), (64, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y1 = yindex // 4 % 2 y2 = yindex // 8 % 4 y3 = yindex // 32 y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x4 + 32 * y1 + 64 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + 2 * y5), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 2, 4, 2), (64, 64, 16, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(128, 2)](arg0_1, buf0, 128, 2, XBLOCK =2, YBLOCK=64, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 8, 8), (64, 64, 8, 1), 0), class ExpandNew(nn.Module): def __init__(self, gain=2): super().__init__() self.gain = gain def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ChaokunChang/SVAS
Expand
false
248
[ "Apache-2.0" ]
0
61af6eb39269edff8ea5147311628b3200c3a3d2
https://github.com/ChaokunChang/SVAS/tree/61af6eb39269edff8ea5147311628b3200c3a3d2
TranLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py # Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.clone] # Source node to ATen node mapping: # aggNodes => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/77/c77fhg42q7klen5ufr7gfavdlq2lmbawyinwhesqnyoe7oze7ag6.py # Topologically Sorted Source Nodes: [aggNodes, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # aggNodes => add # relu => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/y2/cy2wuqwbyhvfsexr4wzystx4m55dkmgikh4mwi6rivcn3b42elvm.py # Topologically Sorted Source Nodes: [aggEmbed_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # aggEmbed_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [aggNodes, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_1.run(buf2, primals_3, buf6, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf3) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((16, 1), (1, 1), torch.bool) # Topologically Sorted Source Nodes: [aggEmbed_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2, primals_3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf3) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((16, 1), (1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4, primals_5, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6 class TranLayerNew(nn.Module): def __init__(self, embed_dim, num_nodes): super(TranLayerNew, self).__init__() self.embed_dim = embed_dim self.num_nodes = num_nodes self.linear_nodes = nn.Linear(in_features=self.num_nodes, out_features=1, bias=True) self.linear_embed = nn.Linear(in_features=self.embed_dim, out_features=1, bias=True) self.reset_parameters() def reset_parameters(self): """ Reinitialize learnable parameters. """ gain = nn.init.calculate_gain('relu') nn.init.xavier_normal_(self.linear_nodes.weight, gain=gain) nn.init.xavier_normal_(self.linear_embed.weight, gain=gain) def forward(self, input_0): primals_2 = self.linear_nodes.weight primals_3 = self.linear_nodes.bias primals_4 = self.linear_embed.weight primals_5 = self.linear_embed.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
WingsUpete/EEG2Age
TranLayer
false
5,985
[ "MIT" ]
1
8d7b9049fe4e47c701659bbbf2843600fa7c8d8d
https://github.com/WingsUpete/EEG2Age/tree/8d7b9049fe4e47c701659bbbf2843600fa7c8d8d