|
module { |
|
tt.func public @triton__0d1d2d3d4d5d6d7de8(%arg0: !tt.ptr<f32, 1> {tt.divisibility = 16 : i32}, %arg1: !tt.ptr<i64, 1> {tt.divisibility = 16 : i32}, %arg2: !tt.ptr<f32, 1> {tt.divisibility = 16 : i32}, %arg3: !tt.ptr<f32, 1> {tt.divisibility = 16 : i32}, %arg4: !tt.ptr<bf16, 1> {tt.divisibility = 16 : i32}, %arg5: !tt.ptr<bf16, 1> {tt.divisibility = 16 : i32}, %arg6: !tt.ptr<bf16, 1> {tt.divisibility = 16 : i32}, %arg7: i64 {tt.divisibility = 16 : i32, tt.max_divisibility = 16 : i32}, %arg8: i64) attributes {noinline = false} { |
|
%cst = arith.constant dense<0.000000e+00> : tensor<8x512xbf16> |
|
%cst_0 = arith.constant dense<0.000000e+00> : tensor<8x1xf32> |
|
%c50257_i32 = arith.constant 50257 : i32 |
|
%c512_i32 = arith.constant 512 : i32 |
|
%c0_i32 = arith.constant 0 : i32 |
|
%cst_1 = arith.constant dense<50257> : tensor<8x1xi64> |
|
%cst_2 = arith.constant dense<50257> : tensor<1x512xi64> |
|
%c8_i64 = arith.constant 8 : i64 |
|
%cst_3 = arith.constant dense<-1> : tensor<8x1xi64> |
|
%cst_4 = arith.constant dense<0.000000e+00> : tensor<8x512xf32> |
|
%0 = tt.get_program_id x : i32 |
|
%1 = arith.extsi %0 : i32 to i64 |
|
%2 = arith.muli %1, %c8_i64 : i64 |
|
%3 = tt.make_range {end = 8 : i32, start = 0 : i32} : tensor<8xi32> |
|
%4 = tt.expand_dims %3 {axis = 1 : i32} : (tensor<8xi32>) -> tensor<8x1xi32> |
|
%5 = arith.extsi %4 : tensor<8x1xi32> to tensor<8x1xi64> |
|
%6 = tt.splat %2 : (i64) -> tensor<8x1xi64> |
|
%7 = arith.addi %6, %5 : tensor<8x1xi64> |
|
%8 = tt.make_range {end = 512 : i32, start = 0 : i32} : tensor<512xi32> |
|
%9 = tt.expand_dims %8 {axis = 0 : i32} : (tensor<512xi32>) -> tensor<1x512xi32> |
|
%10 = arith.extsi %9 : tensor<1x512xi32> to tensor<1x512xi64> |
|
%11 = tt.splat %arg1 : (!tt.ptr<i64, 1>) -> tensor<8x1x!tt.ptr<i64, 1>> |
|
%12 = tt.addptr %11, %7 : tensor<8x1x!tt.ptr<i64, 1>>, tensor<8x1xi64> |
|
%13 = tt.load %12 {cache = 1 : i32, evict = 3 : i32, isVolatile = false} : tensor<8x1xi64> |
|
%14 = tt.addptr %arg2, %c0_i32 : !tt.ptr<f32, 1>, i32 |
|
%15 = tt.load %14 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : f32 |
|
%16 = tt.addptr %arg3, %c0_i32 : !tt.ptr<f32, 1>, i32 |
|
%17 = tt.load %16 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : f32 |
|
%18 = arith.muli %7, %cst_1 : tensor<8x1xi64> |
|
%19 = tt.broadcast %18 : (tensor<8x1xi64>) -> tensor<8x512xi64> |
|
%20 = tt.splat %arg0 : (!tt.ptr<f32, 1>) -> tensor<8x512x!tt.ptr<f32, 1>> |
|
%21 = arith.cmpi ne, %13, %cst_3 : tensor<8x1xi64> |
|
%22 = arith.divf %15, %17 : f32 |
|
%23 = tt.splat %22 : (f32) -> tensor<8x1xf32> |
|
%24 = arith.select %21, %23, %cst_0 : tensor<8x1xi1>, tensor<8x1xf32> |
|
%25 = tt.broadcast %24 : (tensor<8x1xf32>) -> tensor<8x512xf32> |
|
%26 = scf.for %arg9 = %c0_i32 to %c50257_i32 step %c512_i32 iter_args(%arg10 = %cst_4) -> (tensor<8x512xf32>) : i32 { |
|
%41 = arith.extsi %arg9 : i32 to i64 |
|
%42 = tt.splat %41 : (i64) -> tensor<1x512xi64> |
|
%43 = arith.addi %42, %10 : tensor<1x512xi64> |
|
%44 = arith.cmpi slt, %43, %cst_2 : tensor<1x512xi64> |
|
%45 = tt.broadcast %43 : (tensor<1x512xi64>) -> tensor<8x512xi64> |
|
%46 = arith.addi %45, %19 : tensor<8x512xi64> |
|
%47 = tt.addptr %20, %46 : tensor<8x512x!tt.ptr<f32, 1>>, tensor<8x512xi64> |
|
%48 = tt.broadcast %44 : (tensor<1x512xi1>) -> tensor<8x512xi1> |
|
%49 = tt.load %47, %48, %cst_4 {cache = 1 : i32, evict = 3 : i32, isVolatile = false} : tensor<8x512xf32> |
|
%50 = arith.mulf %49, %25 : tensor<8x512xf32> |
|
%51 = arith.addf %arg10, %50 : tensor<8x512xf32> |
|
%52 = arith.select %48, %51, %arg10 : tensor<8x512xi1>, tensor<8x512xf32> |
|
scf.yield %52 : tensor<8x512xf32> |
|
} |
|
%27 = "tt.reduce"(%26) <{axis = 1 : i32}> ({ |
|
^bb0(%arg9: f32, %arg10: f32): |
|
%41 = arith.addf %arg9, %arg10 : f32 |
|
tt.reduce.return %41 : f32 |
|
}) : (tensor<8x512xf32>) -> tensor<8xf32> |
|
%28 = tt.expand_dims %27 {axis = 1 : i32} : (tensor<8xf32>) -> tensor<8x1xf32> |
|
%29 = arith.muli %7, %cst_1 : tensor<8x1xi64> |
|
%30 = tt.broadcast %29 : (tensor<8x1xi64>) -> tensor<8x512xi64> |
|
%31 = tt.splat %arg4 : (!tt.ptr<bf16, 1>) -> tensor<8x512x!tt.ptr<bf16, 1>> |
|
%32 = tt.splat %arg0 : (!tt.ptr<f32, 1>) -> tensor<8x512x!tt.ptr<f32, 1>> |
|
%33 = tt.splat %arg5 : (!tt.ptr<bf16, 1>) -> tensor<8x512x!tt.ptr<bf16, 1>> |
|
%34 = arith.cmpi ne, %13, %cst_3 : tensor<8x1xi64> |
|
%35 = arith.divf %15, %17 : f32 |
|
%36 = tt.splat %35 : (f32) -> tensor<8x1xf32> |
|
%37 = arith.select %34, %36, %cst_0 : tensor<8x1xi1>, tensor<8x1xf32> |
|
%38 = tt.broadcast %37 : (tensor<8x1xf32>) -> tensor<8x512xf32> |
|
%39 = tt.broadcast %28 : (tensor<8x1xf32>) -> tensor<8x512xf32> |
|
%40 = tt.splat %arg6 : (!tt.ptr<bf16, 1>) -> tensor<8x512x!tt.ptr<bf16, 1>> |
|
scf.for %arg9 = %c0_i32 to %c50257_i32 step %c512_i32 : i32 { |
|
%41 = arith.extsi %arg9 : i32 to i64 |
|
%42 = tt.splat %41 : (i64) -> tensor<1x512xi64> |
|
%43 = arith.addi %42, %10 : tensor<1x512xi64> |
|
%44 = arith.cmpi slt, %43, %cst_2 : tensor<1x512xi64> |
|
%45 = tt.broadcast %43 : (tensor<1x512xi64>) -> tensor<8x512xi64> |
|
%46 = arith.addi %45, %30 : tensor<8x512xi64> |
|
%47 = tt.addptr %31, %46 : tensor<8x512x!tt.ptr<bf16, 1>>, tensor<8x512xi64> |
|
%48 = tt.broadcast %44 : (tensor<1x512xi1>) -> tensor<8x512xi1> |
|
%49 = tt.load %47, %48, %cst {cache = 1 : i32, evict = 2 : i32, isVolatile = false} : tensor<8x512xbf16> |
|
%50 = arith.extf %49 : tensor<8x512xbf16> to tensor<8x512xf32> |
|
%51 = tt.addptr %32, %46 : tensor<8x512x!tt.ptr<f32, 1>>, tensor<8x512xi64> |
|
%52 = tt.load %51, %48, %cst_4 {cache = 1 : i32, evict = 2 : i32, isVolatile = false} : tensor<8x512xf32> |
|
%53 = tt.addptr %33, %46 : tensor<8x512x!tt.ptr<bf16, 1>>, tensor<8x512xi64> |
|
%54 = tt.load %53, %48, %cst {cache = 1 : i32, evict = 2 : i32, isVolatile = false} : tensor<8x512xbf16> |
|
%55 = arith.extf %54 : tensor<8x512xbf16> to tensor<8x512xf32> |
|
%56 = arith.mulf %52, %38 : tensor<8x512xf32> |
|
%57 = math.exp %55 : tensor<8x512xf32> |
|
%58 = arith.mulf %57, %39 : tensor<8x512xf32> |
|
%59 = arith.subf %56, %58 : tensor<8x512xf32> |
|
%60 = arith.addf %50, %59 : tensor<8x512xf32> |
|
%61 = tt.addptr %40, %46 : tensor<8x512x!tt.ptr<bf16, 1>>, tensor<8x512xi64> |
|
%62 = arith.truncf %60 : tensor<8x512xf32> to tensor<8x512xbf16> |
|
tt.store %61, %62, %48 {cache = 1 : i32, evict = 1 : i32} : tensor<8x512xbf16> |
|
} |
|
tt.return |
|
} |
|
} |
|
|