language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def call_intrin(dtype, func_name, *args): """Build expression by calling an intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : Expr The call expression. """ args = convert(args) return _make.Call( dtype, func_name, convert(args), _Call.Intrinsic, None, 0)
def call_intrin(dtype, func_name, *args): """Build expression by calling an intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : Expr The call expression. """ args = convert(args) return _make.Call( dtype, func_name, convert(args), _Call.Intrinsic, None, 0)
Python
def nearbyint(x): """Round elements of the array to the nearest integer. This intrinsic uses llvm.nearbyint instead of llvm.round which is faster but will results different from tvm.round. Notably nearbyint rounds according to the rounding mode, whereas tvm.round (llvm.round) ignores that. For differences between the two see: https://en.cppreference.com/w/cpp/numeric/math/round https://en.cppreference.com/w/cpp/numeric/math/nearbyint Parameters ---------- x : Expr Input argument. Returns ------- y : Expr The result. """ return _make.nearbyint(x)
def nearbyint(x): """Round elements of the array to the nearest integer. This intrinsic uses llvm.nearbyint instead of llvm.round which is faster but will results different from tvm.round. Notably nearbyint rounds according to the rounding mode, whereas tvm.round (llvm.round) ignores that. For differences between the two see: https://en.cppreference.com/w/cpp/numeric/math/round https://en.cppreference.com/w/cpp/numeric/math/nearbyint Parameters ---------- x : Expr Input argument. Returns ------- y : Expr The result. """ return _make.nearbyint(x)
Python
def if_then_else(cond, t, f): """Conditional selection expression. Parameters ---------- cond : Expr The condition t : Expr The result expression if cond is true. f : Expr The result expression if cond is false. Returns ------- result : Node The result of conditional expression. Note ---- Unlike Select, if_then_else will not execute the branch that does not satisfy the condition. You can use it to guard against out of bound access. Unlike Select, if_then_else cannot be vectorized if some lanes in the vector have different conditions. """ return _make._OpIfThenElse(convert(cond), convert(t), convert(f))
def if_then_else(cond, t, f): """Conditional selection expression. Parameters ---------- cond : Expr The condition t : Expr The result expression if cond is true. f : Expr The result expression if cond is false. Returns ------- result : Node The result of conditional expression. Note ---- Unlike Select, if_then_else will not execute the branch that does not satisfy the condition. You can use it to guard against out of bound access. Unlike Select, if_then_else cannot be vectorized if some lanes in the vector have different conditions. """ return _make._OpIfThenElse(convert(cond), convert(t), convert(f))
Python
def register_intrin_rule(target, intrin, f=None, override=False): """Register an intrinsic function generation rule. Intrinsic generation rules are callback functions for code generator to get device specific calls. This function simply translates to. :code:`register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)` TVM may already pre-register intrinsic rules in the backend. However, user can use this function to change the intrinsic translation behavior or add new intrinsic rules during runtime. Parameters ---------- target : str The name of codegen target. intrin : str The name of the instrinsic. f : function, optional The function to be registered. override: boolean optional Whether override existing entry. Returns ------- fregister : function Register function if f is not specified. Examples -------- The following code registers exp expansion rule for opencl. .. code-block:: python register_intrin_rule("opencl", "exp", my_exp_rule, override=True) """ return _register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)
def register_intrin_rule(target, intrin, f=None, override=False): """Register an intrinsic function generation rule. Intrinsic generation rules are callback functions for code generator to get device specific calls. This function simply translates to. :code:`register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)` TVM may already pre-register intrinsic rules in the backend. However, user can use this function to change the intrinsic translation behavior or add new intrinsic rules during runtime. Parameters ---------- target : str The name of codegen target. intrin : str The name of the instrinsic. f : function, optional The function to be registered. override: boolean optional Whether override existing entry. Returns ------- fregister : function Register function if f is not specified. Examples -------- The following code registers exp expansion rule for opencl. .. code-block:: python register_intrin_rule("opencl", "exp", my_exp_rule, override=True) """ return _register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)
Python
def _rule_float_suffix(op): """Intrinsic rule: Add float suffix if it is float32. This is an example intrinsic generation rule. Parameters ---------- op : Expr The call expression of original intrinsic. Returns ------- ret : Expr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_rule : The registeration function for intrin rule. """ if op.dtype == "float32": return call_pure_extern(op.dtype, "%sf" % op.name, *op.args) if op.dtype == "float64": return call_pure_extern(op.dtype, op.name, *op.args) return op
def _rule_float_suffix(op): """Intrinsic rule: Add float suffix if it is float32. This is an example intrinsic generation rule. Parameters ---------- op : Expr The call expression of original intrinsic. Returns ------- ret : Expr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_rule : The registeration function for intrin rule. """ if op.dtype == "float32": return call_pure_extern(op.dtype, "%sf" % op.name, *op.args) if op.dtype == "float64": return call_pure_extern(op.dtype, op.name, *op.args) return op
Python
def _rule_float_direct(op): """Intrinsic rule: Directly call pure extern function for floats. This is an example intrinsic generation rule. Parameters ---------- op : Expr The call expression of original intrinsic. Returns ------- ret : Expr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_rule : The registeration function for intrin rule. """ if str(op.dtype).startswith("float"): return call_pure_extern(op.dtype, op.name, *op.args) return None
def _rule_float_direct(op): """Intrinsic rule: Directly call pure extern function for floats. This is an example intrinsic generation rule. Parameters ---------- op : Expr The call expression of original intrinsic. Returns ------- ret : Expr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_rule : The registeration function for intrin rule. """ if str(op.dtype).startswith("float"): return call_pure_extern(op.dtype, op.name, *op.args) return None
Python
def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0, target=utils.CCE): """sparse softmax cross entropy with logits""" if is_grad: return nn.SparseSoftmaxCrossEntropyWithLogitsAd(labels, features, reduction='mean', grad_scale=sens, target=target) return nn.SparseSoftmaxCrossEntropyWithLogits(labels, features, reduction='mean', target=target)
def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0, target=utils.CCE): """sparse softmax cross entropy with logits""" if is_grad: return nn.SparseSoftmaxCrossEntropyWithLogitsAd(labels, features, reduction='mean', grad_scale=sens, target=target) return nn.SparseSoftmaxCrossEntropyWithLogits(labels, features, reduction='mean', target=target)
Python
def Conv2DBackpropInput(out_backprop, input_sizes, filter, filter_shape, pad_list, stride=1, dilation=1, target=utils.CCE): """back propagation of 2d convolution on input""" if len(pad_list) != 4: raise IndexError("Length of pad must be equal 4") pad_ = pad_list data = [] data.append(out_backprop) data.append(filter) fmap_shape = input_sizes filter_shape = filter_shape stride_ = [stride, stride] dilation_ = [dilation, dilation] return nn.ConvBackpropInput(data, fmap_shape, filter_shape, pad_, stride_, dilation_, target=target)
def Conv2DBackpropInput(out_backprop, input_sizes, filter, filter_shape, pad_list, stride=1, dilation=1, target=utils.CCE): """back propagation of 2d convolution on input""" if len(pad_list) != 4: raise IndexError("Length of pad must be equal 4") pad_ = pad_list data = [] data.append(out_backprop) data.append(filter) fmap_shape = input_sizes filter_shape = filter_shape stride_ = [stride, stride] dilation_ = [dilation, dilation] return nn.ConvBackpropInput(data, fmap_shape, filter_shape, pad_, stride_, dilation_, target=target)
Python
def Conv2DBackpropFilter(out_backprop, input, input_shape, filter_sizes, pad_list, stride=1, dilation=1, target=utils.CCE): """back propagation of 2d convolution on filter""" if len(pad_list) != 4: raise IndexError("Length of pad must be equal 4") pad_ = pad_list data = [] data.append(out_backprop) data.append(input) fmap_shape = input_shape filter_shape = filter_sizes stride_ = [stride, stride] dilation_ = [dilation, dilation] return nn.ConvBackpropFilter(data, fmap_shape, filter_shape, pad_, stride_, dilation_, target=target)
def Conv2DBackpropFilter(out_backprop, input, input_shape, filter_sizes, pad_list, stride=1, dilation=1, target=utils.CCE): """back propagation of 2d convolution on filter""" if len(pad_list) != 4: raise IndexError("Length of pad must be equal 4") pad_ = pad_list data = [] data.append(out_backprop) data.append(input) fmap_shape = input_shape filter_shape = filter_sizes stride_ = [stride, stride] dilation_ = [dilation, dilation] return nn.ConvBackpropFilter(data, fmap_shape, filter_shape, pad_, stride_, dilation_, target=target)
Python
def nms_tiling_strategy(tensor): """Custom tiling strategy for nms op""" strategy = list() tensor_shape = get_shape_from_tensor(tensor) for i, _ in enumerate(tensor_shape): if i == 0: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) else: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) return strategy
def nms_tiling_strategy(tensor): """Custom tiling strategy for nms op""" strategy = list() tensor_shape = get_shape_from_tensor(tensor) for i, _ in enumerate(tensor_shape): if i == 0: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) else: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) return strategy
Python
def _div_ascend(data1, data2): """ Calculates x/y, and returns an integer when inputs are all integers. When both arguments are integers, use integer division (also known as "floor division"). When arguments are float numbers, use normal floating point division Note: div supports broadcasting. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8. data2 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8. Returns: tvm.tensor.Tensor, has the same type as data1 and data2. """ utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_TYPES) utils.elemwise_dtype_check(data1.dtype, data2.dtype) dtype = data1.dtype shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] utils.check_shape(shape1) utils.check_shape(shape2) utils.auto_broadcast_check(shape1, shape2) n_shape1, n_shape2, out_shape = produce_shapes(shape1, shape2) if n_shape1 != out_shape: input1_cast = akg.topi.broadcast_to(data1, out_shape) else: input1_cast = data1 if n_shape2 != out_shape: input2_cast = akg.topi.broadcast_to(data2, out_shape) else: input2_cast = data2 if dtype in ("int32", "int8", "uint8"): input1p = Case(input1_cast, "float16", utils.CCE) input2p = Cast(input2_cast, "float16", utils.CCE) else: input1p = input1_cast input2p = input2_cast if product_is_mini(): input2p_rec = Reciprocal(input2p, target=utils.CCE) res = akg.topi.multiply(input1p, input2p_rec) else: res = akg.topi.divide(input1p, input2p) if dtype in ("int8", "uint8"): res = Floor(res, utils.CCE) res = Cast(res, "float16", utils.CCE) if dtype in ("int32", "int8", "uint8"): res = Cast(res, dtype, utils.CCE) return res
def _div_ascend(data1, data2): """ Calculates x/y, and returns an integer when inputs are all integers. When both arguments are integers, use integer division (also known as "floor division"). When arguments are float numbers, use normal floating point division Note: div supports broadcasting. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8. data2 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8. Returns: tvm.tensor.Tensor, has the same type as data1 and data2. """ utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_TYPES) utils.elemwise_dtype_check(data1.dtype, data2.dtype) dtype = data1.dtype shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] utils.check_shape(shape1) utils.check_shape(shape2) utils.auto_broadcast_check(shape1, shape2) n_shape1, n_shape2, out_shape = produce_shapes(shape1, shape2) if n_shape1 != out_shape: input1_cast = akg.topi.broadcast_to(data1, out_shape) else: input1_cast = data1 if n_shape2 != out_shape: input2_cast = akg.topi.broadcast_to(data2, out_shape) else: input2_cast = data2 if dtype in ("int32", "int8", "uint8"): input1p = Case(input1_cast, "float16", utils.CCE) input2p = Cast(input2_cast, "float16", utils.CCE) else: input1p = input1_cast input2p = input2_cast if product_is_mini(): input2p_rec = Reciprocal(input2p, target=utils.CCE) res = akg.topi.multiply(input1p, input2p_rec) else: res = akg.topi.divide(input1p, input2p) if dtype in ("int8", "uint8"): res = Floor(res, utils.CCE) res = Cast(res, "float16", utils.CCE) if dtype in ("int32", "int8", "uint8"): res = Cast(res, dtype, utils.CCE) return res
Python
def Tanh(in_data, target=utils.CCE): """ Compute tanh function. This version is able to avoid exp(x) overflow when x is large. ..math:`res = sign(in_data) * (1 - exp(-2*abs(in_data))) / (1 + exp(-2*abs(in_data)))` Args: in_data (tvm.tensor.Tensor): input tensor of type float16, float32. Returns: tvm.tensor.Tensor, has the same type and shape as in_data. Supported Platforms: 'Ascend' """ utils.check_shape(in_data.shape) dtype = in_data.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) ori_dtype = dtype in_data_compute = in_data if ori_dtype == "float32" and product_is_mini(): in_data_compute = akg.tvm.compute(in_data.shape, lambda *indice: in_data(* \ indice).astype("float16"), name='type_cast') dtype = 'float16' in_data_abs = akg.lang.ascend.vabs(in_data_compute) exponent = akg.lang.ascend.vmuls(in_data_abs, akg.tvm.const(-2, dtype)) exp_value = akg.lang.ascend.vexp(exponent) exp_value_add_one = akg.lang.ascend.vadds(exp_value, akg.tvm.const(1, dtype)) one_sub_exp_value = akg.topi.subtract(akg.tvm.const(1, dtype), exp_value) exp_value_add_one_rec = RecPositive(exp_value_add_one, target) tanh_value_pos = akg.topi.multiply(one_sub_exp_value, exp_value_add_one_rec) output_shape = in_data_compute.shape sign = akg.tvm.compute(output_shape, lambda *indice: akg.tvm.expr.Select(in_data_compute(*indice) < akg.tvm.const(0, dtype), akg.tvm.const(-1, dtype), akg.tvm.const(1, dtype))) tanh_value = akg.topi.multiply(sign, tanh_value_pos) if ori_dtype == "float32" and product_is_mini(): tanh_value = akg.tvm.compute(tanh_value.shape, lambda *indice: tanh_value(*indice).astype("float32"), name='res') return tanh_value
def Tanh(in_data, target=utils.CCE): """ Compute tanh function. This version is able to avoid exp(x) overflow when x is large. ..math:`res = sign(in_data) * (1 - exp(-2*abs(in_data))) / (1 + exp(-2*abs(in_data)))` Args: in_data (tvm.tensor.Tensor): input tensor of type float16, float32. Returns: tvm.tensor.Tensor, has the same type and shape as in_data. Supported Platforms: 'Ascend' """ utils.check_shape(in_data.shape) dtype = in_data.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) ori_dtype = dtype in_data_compute = in_data if ori_dtype == "float32" and product_is_mini(): in_data_compute = akg.tvm.compute(in_data.shape, lambda *indice: in_data(* \ indice).astype("float16"), name='type_cast') dtype = 'float16' in_data_abs = akg.lang.ascend.vabs(in_data_compute) exponent = akg.lang.ascend.vmuls(in_data_abs, akg.tvm.const(-2, dtype)) exp_value = akg.lang.ascend.vexp(exponent) exp_value_add_one = akg.lang.ascend.vadds(exp_value, akg.tvm.const(1, dtype)) one_sub_exp_value = akg.topi.subtract(akg.tvm.const(1, dtype), exp_value) exp_value_add_one_rec = RecPositive(exp_value_add_one, target) tanh_value_pos = akg.topi.multiply(one_sub_exp_value, exp_value_add_one_rec) output_shape = in_data_compute.shape sign = akg.tvm.compute(output_shape, lambda *indice: akg.tvm.expr.Select(in_data_compute(*indice) < akg.tvm.const(0, dtype), akg.tvm.const(-1, dtype), akg.tvm.const(1, dtype))) tanh_value = akg.topi.multiply(sign, tanh_value_pos) if ori_dtype == "float32" and product_is_mini(): tanh_value = akg.tvm.compute(tanh_value.shape, lambda *indice: tanh_value(*indice).astype("float32"), name='res') return tanh_value
Python
def upsampling(inputs, output_shape, target="cce"): """ Upsampling for 4D inputs. Repeats the rows and columns of the data by height and width respectively. Args: inputs(akg.tvm.Tensor): 4D tensor. output_shape(list, tuple): Specifies the shape of output tensor, should be a 4D shape. Returns: akg.tvm.Tensor, has the same type as inputs and is shaped by output_shape. """ inputs_shape = [x.value for x in inputs.shape] if len(inputs_shape) != 4: raise RuntimeError("Input data only support 4-dim(NHWC) shape.") if len(output_shape) != 4: raise RuntimeError("Output data only support 4-dim(NHWC) shape.") if inputs_shape[0] != output_shape[0]: raise ValueError("batch size of input and output must be equal") if inputs_shape[3] != output_shape[3]: raise ValueError("channel size of input and output must be equal") for i in range(1, 3): if output_shape[i] < inputs_shape[i]: raise ValueError("The length in output_shape is less than input_shape.") if output_shape[i] % inputs_shape[i] != 0: raise ValueError("The upsampling scale is not interger.") scale = [int(output_shape[i] / inputs_shape[i]) for i in range(1, 3)] scale = [akg.tvm.convert(s) for s in scale] res = compute_upsampling(inputs, *tuple(scale)) return res
def upsampling(inputs, output_shape, target="cce"): """ Upsampling for 4D inputs. Repeats the rows and columns of the data by height and width respectively. Args: inputs(akg.tvm.Tensor): 4D tensor. output_shape(list, tuple): Specifies the shape of output tensor, should be a 4D shape. Returns: akg.tvm.Tensor, has the same type as inputs and is shaped by output_shape. """ inputs_shape = [x.value for x in inputs.shape] if len(inputs_shape) != 4: raise RuntimeError("Input data only support 4-dim(NHWC) shape.") if len(output_shape) != 4: raise RuntimeError("Output data only support 4-dim(NHWC) shape.") if inputs_shape[0] != output_shape[0]: raise ValueError("batch size of input and output must be equal") if inputs_shape[3] != output_shape[3]: raise ValueError("channel size of input and output must be equal") for i in range(1, 3): if output_shape[i] < inputs_shape[i]: raise ValueError("The length in output_shape is less than input_shape.") if output_shape[i] % inputs_shape[i] != 0: raise ValueError("The upsampling scale is not interger.") scale = [int(output_shape[i] / inputs_shape[i]) for i in range(1, 3)] scale = [akg.tvm.convert(s) for s in scale] res = compute_upsampling(inputs, *tuple(scale)) return res
Python
def Equal(input1, input2, target=utils.CCE): """ check whether input1 equals to input2. Args: input1 (tvm.tensor.Tensor): Tensor. input2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If input1 equal to input2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _equal_ascend(input1, input2) else: return _equal(input1, input2)
def Equal(input1, input2, target=utils.CCE): """ check whether input1 equals to input2. Args: input1 (tvm.tensor.Tensor): Tensor. input2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If input1 equal to input2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _equal_ascend(input1, input2) else: return _equal(input1, input2)
Python
def softmax_op(data, axis, shape): """core computation of softmax op.""" max_data = akg.lang.ascend.reduce_max(data, axis=axis, keepdims=True) max_broadcast = akg.lang.ascend.broadcast(max_data, shape) data_sub = akg.lang.ascend.vsub(data, max_broadcast) if data.dtype == "float32" and product_is_mini(): data16 = akg.topi.cast(data_sub, "float16") data_exp = akg.lang.ascend.vexp(data16) data_exp = akg.topi.cast(data_exp, "float32") else: data_exp = akg.lang.ascend.vexp(data_sub) data_expsum = akg.lang.ascend.sum(data_exp, axis, keepdims=True) data_expsum_broadcast = akg.lang.ascend.broadcast(data_expsum, shape) output = data_exp / data_expsum_broadcast return output
def softmax_op(data, axis, shape): """core computation of softmax op.""" max_data = akg.lang.ascend.reduce_max(data, axis=axis, keepdims=True) max_broadcast = akg.lang.ascend.broadcast(max_data, shape) data_sub = akg.lang.ascend.vsub(data, max_broadcast) if data.dtype == "float32" and product_is_mini(): data16 = akg.topi.cast(data_sub, "float16") data_exp = akg.lang.ascend.vexp(data16) data_exp = akg.topi.cast(data_exp, "float32") else: data_exp = akg.lang.ascend.vexp(data_sub) data_expsum = akg.lang.ascend.sum(data_exp, axis, keepdims=True) data_expsum_broadcast = akg.lang.ascend.broadcast(data_expsum, shape) output = data_exp / data_expsum_broadcast return output
Python
def schedule_injective_from_existing(sch, out, tmp_out, fork_node, fake_out, grid_dims = 0, block_dims = 0, autotune=False, buffer_stitch=False): """Schedule for injective op from existing schedule. Parameters ---------- sch: Schedule The schedule to update. out: Tensor The tensor representing the injective op. tmp_out: List of Tensor The tensors which would be output and as intermediate results in computation. fork_node: List of Tensor The tensors which are fork nodes in computation. fake_out: bool. Indicate whether the out tensor is fake or not. Returns ------- sch: Schedule The updated schedule. """ fused = sch[out].fuse(*sch[out].op.axis) kernel_scope, fused = sch[out].split(fused, nparts=1) if autotune: cfg = autotvm.get_config() cfg.define_knob("tile_x", [4, 8, 16, 32, 64, 128, 256, 512, 1024]) num_thread = cfg['tile_x'].val max_block = int(256 * 1024 / num_thread) else: num_thread = block_dims if block_dims else tvm.target.current_target(allow_none=False).max_num_threads max_block = grid_dims if grid_dims else 256 try: const_size = util.get_const_int(util.prod(out.shape)) need_block_split = const_size > max_block * num_thread num_per_thread = (const_size - 1) // (max_block * num_thread) + 1 except ValueError: need_block_split = False if need_block_split: if not buffer_stitch: xo, xi = sch[out].split(fused, factor=num_thread * max_block) bx, tx = sch[out].split(xi, factor=num_thread) sch[out].reorder(bx, tx, xo) inner_most = xo else: bx, tx = sch[out].split(fused, nparts=max_block) xo, tx = sch[out].split(tx, nparts=num_per_thread) inner_most = xo sch[out].bind(bx, tvm.thread_axis("blockIdx.x")) sch[out].bind(tx, tvm.thread_axis("threadIdx.x")) else: bx, tx = sch[out].split(fused, factor=num_thread) inner_most = tx sch[out].bind(tx, tvm.thread_axis("threadIdx.x")) sch[out].bind(bx, tvm.thread_axis("blockIdx.x")) if fake_out: sch[out].pragma(kernel_scope, "fake_node", out.name) if fork_node: for op in fork_node: loc_op = sch.cache_write(op, "local") sch[loc_op].compute_at(sch[out], inner_most) if tmp_out: for op in tmp_out: sch[op].compute_at(sch[out], inner_most) return sch
def schedule_injective_from_existing(sch, out, tmp_out, fork_node, fake_out, grid_dims = 0, block_dims = 0, autotune=False, buffer_stitch=False): """Schedule for injective op from existing schedule. Parameters ---------- sch: Schedule The schedule to update. out: Tensor The tensor representing the injective op. tmp_out: List of Tensor The tensors which would be output and as intermediate results in computation. fork_node: List of Tensor The tensors which are fork nodes in computation. fake_out: bool. Indicate whether the out tensor is fake or not. Returns ------- sch: Schedule The updated schedule. """ fused = sch[out].fuse(*sch[out].op.axis) kernel_scope, fused = sch[out].split(fused, nparts=1) if autotune: cfg = autotvm.get_config() cfg.define_knob("tile_x", [4, 8, 16, 32, 64, 128, 256, 512, 1024]) num_thread = cfg['tile_x'].val max_block = int(256 * 1024 / num_thread) else: num_thread = block_dims if block_dims else tvm.target.current_target(allow_none=False).max_num_threads max_block = grid_dims if grid_dims else 256 try: const_size = util.get_const_int(util.prod(out.shape)) need_block_split = const_size > max_block * num_thread num_per_thread = (const_size - 1) // (max_block * num_thread) + 1 except ValueError: need_block_split = False if need_block_split: if not buffer_stitch: xo, xi = sch[out].split(fused, factor=num_thread * max_block) bx, tx = sch[out].split(xi, factor=num_thread) sch[out].reorder(bx, tx, xo) inner_most = xo else: bx, tx = sch[out].split(fused, nparts=max_block) xo, tx = sch[out].split(tx, nparts=num_per_thread) inner_most = xo sch[out].bind(bx, tvm.thread_axis("blockIdx.x")) sch[out].bind(tx, tvm.thread_axis("threadIdx.x")) else: bx, tx = sch[out].split(fused, factor=num_thread) inner_most = tx sch[out].bind(tx, tvm.thread_axis("threadIdx.x")) sch[out].bind(bx, tvm.thread_axis("blockIdx.x")) if fake_out: sch[out].pragma(kernel_scope, "fake_node", out.name) if fork_node: for op in fork_node: loc_op = sch.cache_write(op, "local") sch[loc_op].compute_at(sch[out], inner_most) if tmp_out: for op in tmp_out: sch[op].compute_at(sch[out], inner_most) return sch
Python
def schedule_injective(outs, grid_dims=0, block_dims=0, buffer_stitch=False): """Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op. """ outs, tmp_out, fake_out, fork_node = pick_single_out(outs) s = tvm.create_schedule(outs[0].op) tvm.schedule.AutoInlineInjective(s) schedule_injective_from_existing(s, outs[0], tmp_out, fork_node, fake_out, grid_dims, block_dims, buffer_stitch=buffer_stitch) return s
def schedule_injective(outs, grid_dims=0, block_dims=0, buffer_stitch=False): """Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op. """ outs, tmp_out, fake_out, fork_node = pick_single_out(outs) s = tvm.create_schedule(outs[0].op) tvm.schedule.AutoInlineInjective(s) schedule_injective_from_existing(s, outs[0], tmp_out, fork_node, fake_out, grid_dims, block_dims, buffer_stitch=buffer_stitch) return s
Python
def schedule_injective_autotune(outs): """Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op. """ outs, tmp_out, fake_out, fork_node = pick_single_out(outs) s = tvm.create_schedule(outs[0].op) tvm.schedule.AutoInlineInjective(s) schedule_injective_from_existing(s, outs[0], tmp_out, fork_node, fake_out, autotune=True) return s
def schedule_injective_autotune(outs): """Schedule for injective op. Parameters ---------- outs: Array of Tensor The computation graph description of reduce in the format of an array of tensors. Returns ------- sch: Schedule The computation schedule for the op. """ outs, tmp_out, fake_out, fork_node = pick_single_out(outs) s = tvm.create_schedule(outs[0].op) tvm.schedule.AutoInlineInjective(s) schedule_injective_from_existing(s, outs[0], tmp_out, fork_node, fake_out, autotune=True) return s
Python
def div_no_nan(data_x, data_y, target=utils.CCE): """ Returns 0 if the denominator is zero, else, like Div. Args: data_x (tvm.tensor.Tensor): tensor with type int32/int8/uint8, float16/float32. data_y (tvm.tensor.Tensor): tensor with type int32/int8/uint8, float16/float32. Returns: tvm.tensor.Tensor. """ dtype = data_x.dtype if dtype != data_y.dtype: raise TypeError("input dtype should be the same") utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8, utils.DtypeForDavinci.INT32]) utils.check_shape(data_x.shape) utils.check_shape(data_y.shape) utils.auto_broadcast_check(data_x, data_y) # dtype for vsel and vcmp if product_is_mini(): compute_dtype = "float16" else: compute_dtype = "float32" # div fp16 y returns 0 if y < 2^-12 # div fp32 y returns 0 if y < 2^-64 min_val = tvm.const(2**(-12) if product_is_mini() else 2**(-64), dtype=compute_dtype) tvm_one = tvm.const(1, dtype=compute_dtype) tvm_zero = tvm.const(0, dtype=compute_dtype) if not product_is_mini() and dtype == "float16": min_val = tvm.const(2**(-12), "float32") data_y_fp32 = akg.lang.ascend.cast_to(data_y, "float32") # avoid when y > 2^15 cast from fp32 to fp16 in mini clip_y_fp32 = akg.topi.clip(data_y_fp32, -1.0, 1.0) abs_clip_y_fp32 = Abs(clip_y_fp32, target) y_cmp = akg.lang.ascend.cast_to(abs_clip_y_fp32, compute_dtype) is_zero = tvm.compute(data_y.shape, lambda *i : tvm.expr.Select( y_cmp(*i) < min_val, tvm_one, tvm_zero), name="is_zero") # if fp32 y < 2^-24, cast(y,fp16)==0. to find y in (2^-64, 2^-24): if product_is_mini() and dtype == "float32": is_zero = _refine_is_zero(is_zero, abs_clip_y_fp32) is_zero = akg.lang.ascend.cast_to(is_zero, "float32") not_zero = tvm.compute(data_y.shape, lambda *i : (1 - is_zero(*i)).astype("float32"), name="not_zero") # replace [x1 x2]/[y1 0] by [x1 0]/[y1 1] data_x = Mul(akg.lang.ascend.cast_to(data_x, "float32"), not_zero, target=target) data_y = akg.lang.ascend.cast_to(data_y, "float32") + is_zero res = Divide(data_x, data_y, target=target) if dtype in ("int8", "uint8", "int32"): res = akg.lang.ascend.floor(res) res = akg.lang.ascend.cast_to(res, dtype) else: res = akg.lang.ascend.cast_to(res, dtype) return res
def div_no_nan(data_x, data_y, target=utils.CCE): """ Returns 0 if the denominator is zero, else, like Div. Args: data_x (tvm.tensor.Tensor): tensor with type int32/int8/uint8, float16/float32. data_y (tvm.tensor.Tensor): tensor with type int32/int8/uint8, float16/float32. Returns: tvm.tensor.Tensor. """ dtype = data_x.dtype if dtype != data_y.dtype: raise TypeError("input dtype should be the same") utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8, utils.DtypeForDavinci.INT32]) utils.check_shape(data_x.shape) utils.check_shape(data_y.shape) utils.auto_broadcast_check(data_x, data_y) # dtype for vsel and vcmp if product_is_mini(): compute_dtype = "float16" else: compute_dtype = "float32" # div fp16 y returns 0 if y < 2^-12 # div fp32 y returns 0 if y < 2^-64 min_val = tvm.const(2**(-12) if product_is_mini() else 2**(-64), dtype=compute_dtype) tvm_one = tvm.const(1, dtype=compute_dtype) tvm_zero = tvm.const(0, dtype=compute_dtype) if not product_is_mini() and dtype == "float16": min_val = tvm.const(2**(-12), "float32") data_y_fp32 = akg.lang.ascend.cast_to(data_y, "float32") # avoid when y > 2^15 cast from fp32 to fp16 in mini clip_y_fp32 = akg.topi.clip(data_y_fp32, -1.0, 1.0) abs_clip_y_fp32 = Abs(clip_y_fp32, target) y_cmp = akg.lang.ascend.cast_to(abs_clip_y_fp32, compute_dtype) is_zero = tvm.compute(data_y.shape, lambda *i : tvm.expr.Select( y_cmp(*i) < min_val, tvm_one, tvm_zero), name="is_zero") # if fp32 y < 2^-24, cast(y,fp16)==0. to find y in (2^-64, 2^-24): if product_is_mini() and dtype == "float32": is_zero = _refine_is_zero(is_zero, abs_clip_y_fp32) is_zero = akg.lang.ascend.cast_to(is_zero, "float32") not_zero = tvm.compute(data_y.shape, lambda *i : (1 - is_zero(*i)).astype("float32"), name="not_zero") # replace [x1 x2]/[y1 0] by [x1 0]/[y1 1] data_x = Mul(akg.lang.ascend.cast_to(data_x, "float32"), not_zero, target=target) data_y = akg.lang.ascend.cast_to(data_y, "float32") + is_zero res = Divide(data_x, data_y, target=target) if dtype in ("int8", "uint8", "int32"): res = akg.lang.ascend.floor(res) res = akg.lang.ascend.cast_to(res, dtype) else: res = akg.lang.ascend.cast_to(res, dtype) return res
Python
def calculate_params(input_h, input_w, output_h, output_w, dtype): """calculate index parameters for bilinear interpolation""" # scale value is required to map from input space to output space height_scale = (input_h - 1.0) / (output_h - 1.0) width_scale = (input_w - 1.0) / (output_w - 1.0) height_scale = akg.tvm.const(height_scale, dtype=dtype) width_scale = akg.tvm.const(width_scale, dtype=dtype) # ys_lower, ys_upper and ys_lerp contain bottom index, # top index and interpulation factor for each row position of output matrix respectively float_y = akg.tvm.compute([output_h], lambda i: height_scale * i, name="float_y") ys_lower = akg.lang.ascend.floor(float_y) ys_upper = akg.lang.ascend.ceil(float_y) ys_upper_lerp = akg.tvm.compute([output_h], lambda i: float_y[i] - ys_lower[i], name="ys_upper_lerp") ys_lower_temp = akg.lang.ascend.vmuls(ys_upper_lerp, akg.tvm.const(-1.0, dtype)) ys_lower_lerp = akg.lang.ascend.vadds(ys_lower_temp, akg.tvm.const(1.0, dtype)) # xs_lower,xs_upper and xs_lerp contain left index, # right index and interpulation factor for each column position of output matrix respectively float_x = akg.tvm.compute([output_w], lambda i: width_scale * i, name="float_x") xs_lower = akg.lang.ascend.floor(float_x) xs_upper = akg.lang.ascend.ceil(float_x) xs_upper_lerp = akg.tvm.compute([output_w], lambda i: float_x[i] - xs_lower[i], name="xs_upper_lerp") xs_lower_temp = akg.lang.ascend.vmuls(xs_upper_lerp, akg.tvm.const(-1.0, dtype)) xs_lower_lerp = akg.lang.ascend.vadds(xs_lower_temp, akg.tvm.const(1.0, dtype)) return xs_lower, xs_lower_lerp, xs_upper, xs_upper_lerp, ys_lower, ys_lower_lerp, ys_upper, ys_upper_lerp
def calculate_params(input_h, input_w, output_h, output_w, dtype): """calculate index parameters for bilinear interpolation""" # scale value is required to map from input space to output space height_scale = (input_h - 1.0) / (output_h - 1.0) width_scale = (input_w - 1.0) / (output_w - 1.0) height_scale = akg.tvm.const(height_scale, dtype=dtype) width_scale = akg.tvm.const(width_scale, dtype=dtype) # ys_lower, ys_upper and ys_lerp contain bottom index, # top index and interpulation factor for each row position of output matrix respectively float_y = akg.tvm.compute([output_h], lambda i: height_scale * i, name="float_y") ys_lower = akg.lang.ascend.floor(float_y) ys_upper = akg.lang.ascend.ceil(float_y) ys_upper_lerp = akg.tvm.compute([output_h], lambda i: float_y[i] - ys_lower[i], name="ys_upper_lerp") ys_lower_temp = akg.lang.ascend.vmuls(ys_upper_lerp, akg.tvm.const(-1.0, dtype)) ys_lower_lerp = akg.lang.ascend.vadds(ys_lower_temp, akg.tvm.const(1.0, dtype)) # xs_lower,xs_upper and xs_lerp contain left index, # right index and interpulation factor for each column position of output matrix respectively float_x = akg.tvm.compute([output_w], lambda i: width_scale * i, name="float_x") xs_lower = akg.lang.ascend.floor(float_x) xs_upper = akg.lang.ascend.ceil(float_x) xs_upper_lerp = akg.tvm.compute([output_w], lambda i: float_x[i] - xs_lower[i], name="xs_upper_lerp") xs_lower_temp = akg.lang.ascend.vmuls(xs_upper_lerp, akg.tvm.const(-1.0, dtype)) xs_lower_lerp = akg.lang.ascend.vadds(xs_lower_temp, akg.tvm.const(1.0, dtype)) return xs_lower, xs_lower_lerp, xs_upper, xs_upper_lerp, ys_lower, ys_lower_lerp, ys_upper, ys_upper_lerp
Python
def resize_bilinear(input, output_shape): """ Resize images using bilinear interpolation. Args: input (tvm.tensor.Tensor): 4-D tensor of type float16 or float32 `("NHWC")`. output_shape (Union[tuple, list]): New size of image, two integer `H` and `W`. Returns: tvm.tensor.Tensor, shape `(input.shape[0], output_shape[0], output_shape[1], input.shape[3])`, has of the same type as `input`. """ utils.check_shape(input, 4, "input") utils.check_shape(output_shape, 2, "output_shape") utils.ops_dtype_check(input.dtype, utils.DtypeForDavinci.ALL_FLOAT) inputs_shape = get_shape(input) dtype = input.dtype if inputs_shape[1:3] == list(output_shape): res = akg.tvm.compute(inputs_shape, lambda *i: input(*i), name="assign") return res # Get N,H,W,C from input and output shape input_h, input_w = inputs_shape[1:3] output_h, output_w = output_shape xs_lower, xs_lower_lerp, xs_upper, xs_upper_lerp, ys_lower, ys_lower_lerp, ys_upper, ys_upper_lerp = \ calculate_params(input_h, input_w, output_h, output_w, dtype) newH = akg.tvm.const(output_h, "int32") newW = akg.tvm.const(output_w, "int32") resH1 = resizeH1(input, newH, ys_lower, ys_lower_lerp) resH2 = resizeH2(input, newH, ys_upper, ys_upper_lerp) resH = akg.lang.ascend.vadd(resH1, resH2) resW1 = resizeW1(resH, newW, xs_lower, xs_lower_lerp) resW2 = resizeW2(resH, newW, xs_upper, xs_upper_lerp) res = akg.lang.ascend.vadd(resW1, resW2) return res
def resize_bilinear(input, output_shape): """ Resize images using bilinear interpolation. Args: input (tvm.tensor.Tensor): 4-D tensor of type float16 or float32 `("NHWC")`. output_shape (Union[tuple, list]): New size of image, two integer `H` and `W`. Returns: tvm.tensor.Tensor, shape `(input.shape[0], output_shape[0], output_shape[1], input.shape[3])`, has of the same type as `input`. """ utils.check_shape(input, 4, "input") utils.check_shape(output_shape, 2, "output_shape") utils.ops_dtype_check(input.dtype, utils.DtypeForDavinci.ALL_FLOAT) inputs_shape = get_shape(input) dtype = input.dtype if inputs_shape[1:3] == list(output_shape): res = akg.tvm.compute(inputs_shape, lambda *i: input(*i), name="assign") return res # Get N,H,W,C from input and output shape input_h, input_w = inputs_shape[1:3] output_h, output_w = output_shape xs_lower, xs_lower_lerp, xs_upper, xs_upper_lerp, ys_lower, ys_lower_lerp, ys_upper, ys_upper_lerp = \ calculate_params(input_h, input_w, output_h, output_w, dtype) newH = akg.tvm.const(output_h, "int32") newW = akg.tvm.const(output_w, "int32") resH1 = resizeH1(input, newH, ys_lower, ys_lower_lerp) resH2 = resizeH2(input, newH, ys_upper, ys_upper_lerp) resH = akg.lang.ascend.vadd(resH1, resH2) resW1 = resizeW1(resH, newW, xs_lower, xs_lower_lerp) resW2 = resizeW2(resH, newW, xs_upper, xs_upper_lerp) res = akg.lang.ascend.vadd(resW1, resW2) return res
Python
def reduce_logsumexp(data, axis=None, keepdims=False, target="cce"): """ Compute `log(sum(exp(elements across dimensions of a tensor)))` of elements over a give axis or a list of axes of a tensor Args: data: (tvm.tensor.Tensor): Tensor of type float16 axis: The dimensions to reduce. Could be None(by default), int, list or tuple. If None, all dimenstions will be reduced. If int or list, must be in the range of [-len(date.shape), len(date.shape)-1] keepdims: Boolean. If true, remians reduced dimensions with lengthe 1. False by default Returns: tvm.tensor.Tensor, has the same shape and type as data. """ check_list = ["float16"] dtype = data.dtype if not dtype in check_list: raise RuntimeError("reduce_logsumexp_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) shape = [x.value for x in data.shape] utils.check_shape(shape) exp_ = vexp(data) sum_ = sum(exp_, axis=axis, keepdims=keepdims) res = vlog(sum_) return res
def reduce_logsumexp(data, axis=None, keepdims=False, target="cce"): """ Compute `log(sum(exp(elements across dimensions of a tensor)))` of elements over a give axis or a list of axes of a tensor Args: data: (tvm.tensor.Tensor): Tensor of type float16 axis: The dimensions to reduce. Could be None(by default), int, list or tuple. If None, all dimenstions will be reduced. If int or list, must be in the range of [-len(date.shape), len(date.shape)-1] keepdims: Boolean. If true, remians reduced dimensions with lengthe 1. False by default Returns: tvm.tensor.Tensor, has the same shape and type as data. """ check_list = ["float16"] dtype = data.dtype if not dtype in check_list: raise RuntimeError("reduce_logsumexp_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) shape = [x.value for x in data.shape] utils.check_shape(shape) exp_ = vexp(data) sum_ = sum(exp_, axis=axis, keepdims=keepdims) res = vlog(sum_) return res
Python
def _check_axis(axis, shape): """double check if axises are valid.""" shape_tmp = list(shape).copy() while shape_tmp[-1] == 1 and len(shape_tmp) > 1: shape_tmp.pop() if (len(shape_tmp) - 1) in axis or -1 in axis: raise RuntimeError("Do not support reverse on last dimension!") for i in axis: if i not in range(-len(shape_tmp), len(shape_tmp)): raise ValueError("Axis is invalid!")
def _check_axis(axis, shape): """double check if axises are valid.""" shape_tmp = list(shape).copy() while shape_tmp[-1] == 1 and len(shape_tmp) > 1: shape_tmp.pop() if (len(shape_tmp) - 1) in axis or -1 in axis: raise RuntimeError("Do not support reverse on last dimension!") for i in axis: if i not in range(-len(shape_tmp), len(shape_tmp)): raise ValueError("Axis is invalid!")
Python
def reverse(input_data, axis): """ Reverse a tensor on some dimension. Args: input_data (tvm.tensor.Tensor): Tensor of float16, float32 and int32. axis (Union[list, tuple, int]): Because of don't support reverse which contain last dim, so can't equal None. Returns: tvm.tensor.Tensor,has the same type and shape as input_data """ shape = get_shape(input_data) dtype = input_data.dtype # check dtype and shape utils.check_shape(shape) utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) # check axis shape_len = len(shape) if hasattr(axis, 'index'): axis = list(axis) if isinstance(axis, int): axis = [axis] utils.axis_check(shape_len, axis) _check_axis(axis, shape) # compute res res = reverse_compute(input_data, axis) return res
def reverse(input_data, axis): """ Reverse a tensor on some dimension. Args: input_data (tvm.tensor.Tensor): Tensor of float16, float32 and int32. axis (Union[list, tuple, int]): Because of don't support reverse which contain last dim, so can't equal None. Returns: tvm.tensor.Tensor,has the same type and shape as input_data """ shape = get_shape(input_data) dtype = input_data.dtype # check dtype and shape utils.check_shape(shape) utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) # check axis shape_len = len(shape) if hasattr(axis, 'index'): axis = list(axis) if isinstance(axis, int): axis = [axis] utils.axis_check(shape_len, axis) _check_axis(axis, shape) # compute res res = reverse_compute(input_data, axis) return res
Python
def apply_gradient_descent_run(shape, dtype, attrs=None): """run function for dsl function apply_gradient_descent.""" shapes = [shape, (1,), shape] dtypes = [dtype] * len(shapes) mod = utils.op_build_test(apply_gradient_descent, shapes, dtypes, kernel_name='apply_gradient_descent', attrs=attrs) inputs, expect, args = gen_data(shape, dtype) output = utils.mod_launch(mod, args, outputs=(0,), expect=expect) rtol, atol = get_rtol_atol("apply_gradient_descent", dtype) result = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) return inputs, output, expect, result
def apply_gradient_descent_run(shape, dtype, attrs=None): """run function for dsl function apply_gradient_descent.""" shapes = [shape, (1,), shape] dtypes = [dtype] * len(shapes) mod = utils.op_build_test(apply_gradient_descent, shapes, dtypes, kernel_name='apply_gradient_descent', attrs=attrs) inputs, expect, args = gen_data(shape, dtype) output = utils.mod_launch(mod, args, outputs=(0,), expect=expect) rtol, atol = get_rtol_atol("apply_gradient_descent", dtype) result = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True) return inputs, output, expect, result
Python
def gen_data(shape, dtype): """Generate data for testing the op.""" var = random_gaussian(shape, miu=10, sigma=0.3).astype(dtype) alpha = random_gaussian((1,), miu=3, sigma=0.3).astype(dtype) delta = random_gaussian(shape, miu=4, sigma=0.3).astype(dtype) inputs = [var, alpha, delta] expect = var - alpha * delta args = inputs return inputs, expect, args
def gen_data(shape, dtype): """Generate data for testing the op.""" var = random_gaussian(shape, miu=10, sigma=0.3).astype(dtype) alpha = random_gaussian((1,), miu=3, sigma=0.3).astype(dtype) delta = random_gaussian(shape, miu=4, sigma=0.3).astype(dtype) inputs = [var, alpha, delta] expect = var - alpha * delta args = inputs return inputs, expect, args
Python
def cosh_call(x): """Compute cosh by the call method.""" dtype = x.dtype shape = get_shape(x) # in order to get the precise calcuate result if product_is_mini() and dtype == "float32": x = akg.lang.ascend.cast_to(x, "float16") res = akg.tvm.compute(shape, lambda *indice: akg.lang.ascend.cosh(x(*indice)), name="res") if product_is_mini() and dtype == "float32": res = akg.lang.ascend.cast_to(res, "float32") return res, get_attrs()
def cosh_call(x): """Compute cosh by the call method.""" dtype = x.dtype shape = get_shape(x) # in order to get the precise calcuate result if product_is_mini() and dtype == "float32": x = akg.lang.ascend.cast_to(x, "float16") res = akg.tvm.compute(shape, lambda *indice: akg.lang.ascend.cosh(x(*indice)), name="res") if product_is_mini() and dtype == "float32": res = akg.lang.ascend.cast_to(res, "float32") return res, get_attrs()
Python
def Cosh(data, target=utils.CCE): """ cosh op for input tensor. ..math:`y = (e^(x)+e^(-x))/2` Args: data (tvm.tensor.Tensor): tensor with type float16 or float32. Returns: tvm.tensor.Tensor. Supported Platforms: 'Ascend' """ dtype = data.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.check_shape(data.shape) return cosh_call(data)
def Cosh(data, target=utils.CCE): """ cosh op for input tensor. ..math:`y = (e^(x)+e^(-x))/2` Args: data (tvm.tensor.Tensor): tensor with type float16 or float32. Returns: tvm.tensor.Tensor. Supported Platforms: 'Ascend' """ dtype = data.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.check_shape(data.shape) return cosh_call(data)
Python
def find_cuda_path(): """Utility function to find cuda path Returns ------- path : str Path to cuda root. """ if "CUDA_PATH" in os.environ: return os.environ["CUDA_PATH"] cmd = ["which", "nvcc"] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: return os.path.realpath(os.path.join(str(out).strip(), "../..")) cuda_path = "/usr/local/cuda" if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): return cuda_path raise RuntimeError("Cannot find cuda path")
def find_cuda_path(): """Utility function to find cuda path Returns ------- path : str Path to cuda root. """ if "CUDA_PATH" in os.environ: return os.environ["CUDA_PATH"] cmd = ["which", "nvcc"] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: return os.path.realpath(os.path.join(str(out).strip(), "../..")) cuda_path = "/usr/local/cuda" if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): return cuda_path raise RuntimeError("Cannot find cuda path")
Python
def parse_compute_version(compute_version): """Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number """ split_ver = compute_version.split('.') try: major = int(split_ver[0]) minor = int(split_ver[1]) return major, minor except (IndexError, ValueError) as err: raise RuntimeError("Compute version parsing error: " + str(err))
def parse_compute_version(compute_version): """Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number """ split_ver = compute_version.split('.') try: major = int(split_ver[0]) minor = int(split_ver[1]) return major, minor except (IndexError, ValueError) as err: raise RuntimeError("Compute version parsing error: " + str(err))
Python
def have_fp16(compute_version): """Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") """ major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions if major == 5 and minor == 3: return True # NOTE: exclude compute capability 6.1 devices although it is actually available # to compute fp16, because these devices only have low-rate fp16 performance. if major == 6 and minor != 1: return True if major == 7: return True return False
def have_fp16(compute_version): """Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") """ major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions if major == 5 and minor == 3: return True # NOTE: exclude compute capability 6.1 devices although it is actually available # to compute fp16, because these devices only have low-rate fp16 performance. if major == 6 and minor != 1: return True if major == 7: return True return False
Python
def have_tensorcore(compute_version): """Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "7.0") """ major, _ = parse_compute_version(compute_version) if major == 7: return True return False
def have_tensorcore(compute_version): """Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "7.0") """ major, _ = parse_compute_version(compute_version) if major == 7: return True return False
Python
def matrix_diag(data, out_shape): """ Generate a batched tensor whose value in diagonal lines are defined in `data`. Args: data (tvm.tensor.Tensor): A tensor of type float16, float32 or int32. Rank is L. out_shape (Union[list, tuple]): Output shape of length L + 1. The value of `out_shape[0, ..., L-1]` should be equal to `data.shape[0, ..., L-1]`. Returns: tvm.tensor.Tensor, has same type as "data", shape is "out_shape". """ dtype = data.dtype utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) shape = get_shape(data) utils.check_shape(data) utils.check_shape(out_shape, length=len(shape) + 1) if tuple(shape[:-1]) != tuple(out_shape[:-2]): raise RuntimeError("The value of out_shape[:-2] should be equal to data.shape[:-1]") res = akg.tvm.compute(out_shape, lambda *i: akg.tvm.if_then_else(akg.tvm.all(i[-1] == i[-2], i[-1] < shape[-1]), data(*i[:-1]), zero_const(dtype)), name="diag") return res
def matrix_diag(data, out_shape): """ Generate a batched tensor whose value in diagonal lines are defined in `data`. Args: data (tvm.tensor.Tensor): A tensor of type float16, float32 or int32. Rank is L. out_shape (Union[list, tuple]): Output shape of length L + 1. The value of `out_shape[0, ..., L-1]` should be equal to `data.shape[0, ..., L-1]`. Returns: tvm.tensor.Tensor, has same type as "data", shape is "out_shape". """ dtype = data.dtype utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) shape = get_shape(data) utils.check_shape(data) utils.check_shape(out_shape, length=len(shape) + 1) if tuple(shape[:-1]) != tuple(out_shape[:-2]): raise RuntimeError("The value of out_shape[:-2] should be equal to data.shape[:-1]") res = akg.tvm.compute(out_shape, lambda *i: akg.tvm.if_then_else(akg.tvm.all(i[-1] == i[-2], i[-1] < shape[-1]), data(*i[:-1]), zero_const(dtype)), name="diag") return res
Python
def upload(self, data, target=None): """Upload file to remote runtime temp folder Parameters ---------- data : str or bytearray The file name or binary in local to upload. target : str, optional The path in remote """ if isinstance(data, bytearray): if not target: raise ValueError("target must present when file is a bytearray") blob = data else: blob = bytearray(open(data, "rb").read()) if not target: target = os.path.basename(data) if "upload" not in self._remote_funcs: self._remote_funcs["upload"] = self.get_function( "tvm.rpc.server.upload") self._remote_funcs["upload"](target, blob)
def upload(self, data, target=None): """Upload file to remote runtime temp folder Parameters ---------- data : str or bytearray The file name or binary in local to upload. target : str, optional The path in remote """ if isinstance(data, bytearray): if not target: raise ValueError("target must present when file is a bytearray") blob = data else: blob = bytearray(open(data, "rb").read()) if not target: target = os.path.basename(data) if "upload" not in self._remote_funcs: self._remote_funcs["upload"] = self.get_function( "tvm.rpc.server.upload") self._remote_funcs["upload"](target, blob)
Python
def download(self, path): """Download file from remote temp folder. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- blob : bytearray The result blob from the file. """ if "download" not in self._remote_funcs: self._remote_funcs["download"] = self.get_function( "tvm.rpc.server.download") return self._remote_funcs["download"](path)
def download(self, path): """Download file from remote temp folder. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- blob : bytearray The result blob from the file. """ if "download" not in self._remote_funcs: self._remote_funcs["download"] = self.get_function( "tvm.rpc.server.download") return self._remote_funcs["download"](path)
Python
def remove(self, path): """Remove file from remote temp folder. Parameters ---------- path: str The relative location to remote temp folder. """ if "remove" not in self._remote_funcs: self._remote_funcs["remove"] = self.get_function( "tvm.rpc.server.remove") self._remote_funcs["remove"](path)
def remove(self, path): """Remove file from remote temp folder. Parameters ---------- path: str The relative location to remote temp folder. """ if "remove" not in self._remote_funcs: self._remote_funcs["remove"] = self.get_function( "tvm.rpc.server.remove") self._remote_funcs["remove"](path)
Python
def load_module(self, path): """Load a remote module, the file need to be uploaded first. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- m : Module The remote module containing remote function. """ return base._LoadRemoteModule(self._sess, path)
def load_module(self, path): """Load a remote module, the file need to be uploaded first. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- m : Module The remote module containing remote function. """ return base._LoadRemoteModule(self._sess, path)
Python
def summary(self): """Get the summary dict of the tracker.""" base.sendjson(self._sock, [base.TrackerCode.SUMMARY]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) return value[1]
def summary(self): """Get the summary dict of the tracker.""" base.sendjson(self._sock, [base.TrackerCode.SUMMARY]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) return value[1]
Python
def text_summary(self): """Get a text summary of the tracker.""" data = self.summary() total_ct = {} res = "" res += "Server List\n" res += "----------------------------\n" res += "server-address\tkey\n" res += "----------------------------\n" for item in data["server_info"]: addr = item["addr"] res += addr[0] + ":" + str(addr[1]) + "\t" res += item["key"] + "\n" key = item['key'].split(':')[1] # 'server:rasp3b` -> 'rasp3b' if key not in total_ct: total_ct[key] = 0 total_ct[key] += 1 res += "----------------------------\n" res += "\n" # compute max length of device key queue_info = data['queue_info'] keys = list(queue_info.keys()) if keys: keys.sort() max_key_len = max([len(k) for k in keys]) else: max_key_len = 0 res += "Queue Status\n" title = ("%%-%ds" % max_key_len + " total free pending\n") % 'key' separate_line = '-' * len(title) + '\n' res += separate_line + title + separate_line for k in keys: total = total_ct.get(k, 0) free, pending = queue_info[k]["free"], queue_info[k]["pending"] if total or pending: res += ("%%-%ds" % max_key_len + " %-5d %-4d %-7d\n") % \ (k, total, free, pending) res += separate_line return res
def text_summary(self): """Get a text summary of the tracker.""" data = self.summary() total_ct = {} res = "" res += "Server List\n" res += "----------------------------\n" res += "server-address\tkey\n" res += "----------------------------\n" for item in data["server_info"]: addr = item["addr"] res += addr[0] + ":" + str(addr[1]) + "\t" res += item["key"] + "\n" key = item['key'].split(':')[1] # 'server:rasp3b` -> 'rasp3b' if key not in total_ct: total_ct[key] = 0 total_ct[key] += 1 res += "----------------------------\n" res += "\n" # compute max length of device key queue_info = data['queue_info'] keys = list(queue_info.keys()) if keys: keys.sort() max_key_len = max([len(k) for k in keys]) else: max_key_len = 0 res += "Queue Status\n" title = ("%%-%ds" % max_key_len + " total free pending\n") % 'key' separate_line = '-' * len(title) + '\n' res += separate_line + title + separate_line for k in keys: total = total_ct.get(k, 0) free, pending = queue_info[k]["free"], queue_info[k]["pending"] if total or pending: res += ("%%-%ds" % max_key_len + " %-5d %-4d %-7d\n") % \ (k, total, free, pending) res += separate_line return res
Python
def request(self, key, priority=1, session_timeout=0, max_retry=5): """Request a new connection from the tracker. Parameters ---------- key : str The type key of the device. priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry before give up. """ last_err = None for _ in range(max_retry): try: if self._sock is None: self._connect() base.sendjson(self._sock, [base.TrackerCode.REQUEST, key, "", priority]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) url, port, matchkey = value[1] return connect(url, port, matchkey, session_timeout) except socket.error as err: self.close() last_err = err except TVMError as err: last_err = err raise RuntimeError( "Cannot request %s after %d retry, last_error:%s" % ( key, max_retry, str(last_err)))
def request(self, key, priority=1, session_timeout=0, max_retry=5): """Request a new connection from the tracker. Parameters ---------- key : str The type key of the device. priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry before give up. """ last_err = None for _ in range(max_retry): try: if self._sock is None: self._connect() base.sendjson(self._sock, [base.TrackerCode.REQUEST, key, "", priority]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) url, port, matchkey = value[1] return connect(url, port, matchkey, session_timeout) except socket.error as err: self.close() last_err = err except TVMError as err: last_err = err raise RuntimeError( "Cannot request %s after %d retry, last_error:%s" % ( key, max_retry, str(last_err)))
Python
def request_and_run(self, key, func, priority=1, session_timeout=0, max_retry=2): """Request a resource from tracker and run the func. This function safe-guard rare server node dropout during execution. In such case, a new resource will be requested and func will be ran again. Parameters ---------- key : str The type key of the device. func : function of session -> value A stateless function priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry the function before give up. """ last_err = None for _ in range(max_retry): try: sess = self.request(key, priority=priority, session_timeout=session_timeout) tstart = time.time() return func(sess) except TVMError as err: duration = time.time() - tstart # roughly estimate if the error is due to timeout termination if session_timeout and duration >= session_timeout * 0.95: raise RuntimeError( "Session timeout when running %s" % func.__name__) last_err = err raise RuntimeError( "Failed to run on %s after %d retry, last_error:%s" % ( key, max_retry, str(last_err)))
def request_and_run(self, key, func, priority=1, session_timeout=0, max_retry=2): """Request a resource from tracker and run the func. This function safe-guard rare server node dropout during execution. In such case, a new resource will be requested and func will be ran again. Parameters ---------- key : str The type key of the device. func : function of session -> value A stateless function priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry the function before give up. """ last_err = None for _ in range(max_retry): try: sess = self.request(key, priority=priority, session_timeout=session_timeout) tstart = time.time() return func(sess) except TVMError as err: duration = time.time() - tstart # roughly estimate if the error is due to timeout termination if session_timeout and duration >= session_timeout * 0.95: raise RuntimeError( "Session timeout when running %s" % func.__name__) last_err = err raise RuntimeError( "Failed to run on %s after %d retry, last_error:%s" % ( key, max_retry, str(last_err)))
Python
def connect(url, port, key="", session_timeout=0): """Connect to RPC Server Parameters ---------- url : str The url of the host port : int The port to connect to key : str, optional Additional key to match server session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. Returns ------- sess : RPCSession The connected session. """ try: if session_timeout: key += " -timeout=%s" % str(session_timeout) sess = base._Connect(url, port, key) except NameError: raise RuntimeError("Please compile with USE_RPC=1") return RPCSession(sess)
def connect(url, port, key="", session_timeout=0): """Connect to RPC Server Parameters ---------- url : str The url of the host port : int The port to connect to key : str, optional Additional key to match server session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. Returns ------- sess : RPCSession The connected session. """ try: if session_timeout: key += " -timeout=%s" % str(session_timeout) sess = base._Connect(url, port, key) except NameError: raise RuntimeError("Please compile with USE_RPC=1") return RPCSession(sess)
Python
def dump_cuda_meta(code, ptx, thread_info, workspace=None): """ Function for dumping cuda meta. Args: code: gpu code. ptx: ptx code. thread_info: thread info, written to json file. workspace: workspace info, which will be allocated in global memory. """ title_dict = dict() # kernel name kernel_name = code.split("_kernel")[0].split(" ")[-1] title_dict["kernelName"] = kernel_name + "_kernel0" # sha256 of ptx sha256 = hashlib.sha256() sha256.update(ptx.encode("utf-8")) hash_str = sha256.hexdigest() title_dict["sha256"] = hash_str # thread info thread_info_dict = { "blockIdx.x": 1, "blockIdx.y": 1, "blockIdx.z": 1, "threadIdx.x": 1, "threadIdx.y": 1, "threadIdx.z": 1 } for thread_tag in thread_info_dict.keys(): if thread_tag in thread_info: if isinstance(thread_info[thread_tag], int): thread_info_dict[thread_tag] = thread_info[thread_tag] elif isinstance(thread_info[thread_tag], akg.tvm.expr.IntImm): thread_info_dict[thread_tag] = thread_info[thread_tag].value title_dict.update(thread_info_dict) # workspace workspace_dict = parse_workspace(workspace) if workspace_dict is not None: title_dict["workspace"] = workspace_dict meta_path = get_kernel_meta_path() cuda_path = os.path.realpath(meta_path) if not os.path.isdir(cuda_path): os.makedirs(cuda_path, exist_ok=True) # save ptx file to cuda meta ptx_file = os.path.realpath(meta_path + kernel_name + ".ptx") if os.path.exists(ptx_file): os.remove(ptx_file) with open(ptx_file, "at") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0, 2) if f.tell() == 0: f.write(ptx) # modify the file permisson to 400 os.chmod(ptx_file, 0o400) # save json file to cuda meta json_file = os.path.realpath(meta_path + kernel_name + ".json") write_code(title_dict, json_file)
def dump_cuda_meta(code, ptx, thread_info, workspace=None): """ Function for dumping cuda meta. Args: code: gpu code. ptx: ptx code. thread_info: thread info, written to json file. workspace: workspace info, which will be allocated in global memory. """ title_dict = dict() # kernel name kernel_name = code.split("_kernel")[0].split(" ")[-1] title_dict["kernelName"] = kernel_name + "_kernel0" # sha256 of ptx sha256 = hashlib.sha256() sha256.update(ptx.encode("utf-8")) hash_str = sha256.hexdigest() title_dict["sha256"] = hash_str # thread info thread_info_dict = { "blockIdx.x": 1, "blockIdx.y": 1, "blockIdx.z": 1, "threadIdx.x": 1, "threadIdx.y": 1, "threadIdx.z": 1 } for thread_tag in thread_info_dict.keys(): if thread_tag in thread_info: if isinstance(thread_info[thread_tag], int): thread_info_dict[thread_tag] = thread_info[thread_tag] elif isinstance(thread_info[thread_tag], akg.tvm.expr.IntImm): thread_info_dict[thread_tag] = thread_info[thread_tag].value title_dict.update(thread_info_dict) # workspace workspace_dict = parse_workspace(workspace) if workspace_dict is not None: title_dict["workspace"] = workspace_dict meta_path = get_kernel_meta_path() cuda_path = os.path.realpath(meta_path) if not os.path.isdir(cuda_path): os.makedirs(cuda_path, exist_ok=True) # save ptx file to cuda meta ptx_file = os.path.realpath(meta_path + kernel_name + ".ptx") if os.path.exists(ptx_file): os.remove(ptx_file) with open(ptx_file, "at") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0, 2) if f.tell() == 0: f.write(ptx) # modify the file permisson to 400 os.chmod(ptx_file, 0o400) # save json file to cuda meta json_file = os.path.realpath(meta_path + kernel_name + ".json") write_code(title_dict, json_file)
Python
def Addn(data, target=utils.CCE): """ Compute sum of all elements in tensor. Args: data (tvm.tensor.Tensor): Tensor of of type float16, float32. Returns: tvm.tensor.Tensor, compute result, get all elements' sum. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) # check types dtype = data[0].dtype if target == utils.CCE: utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) res = data[0] for i in range(1, len(data)): utils.elemwise_dtype_check(res.dtype, data[i].dtype) utils.elemwise_shape_check(res.shape, data[i].shape) res = akg.topi.elemwise_sum(data) return res
def Addn(data, target=utils.CCE): """ Compute sum of all elements in tensor. Args: data (tvm.tensor.Tensor): Tensor of of type float16, float32. Returns: tvm.tensor.Tensor, compute result, get all elements' sum. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) # check types dtype = data[0].dtype if target == utils.CCE: utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) res = data[0] for i in range(1, len(data)): utils.elemwise_dtype_check(res.dtype, data[i].dtype) utils.elemwise_shape_check(res.shape, data[i].shape) res = akg.topi.elemwise_sum(data) return res
Python
def matmul(x, y, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False, transpose_y=False, has_bias=False, attrs=None): """ Computes matrix multiplication x * y + b. Args: x: akg.tvm.Tensor of type int8, uint8, float16, float32, int32. Left matrix. y: akg.tvm.Tensor of same type as x. Right matrix. b: akg.tvm.Tensor of same type as x. Bias tensor. out_dtype: str. Data type of output tensor. left_format: str. Data format of left matrix. Supported data format list ["zZ", "nZ", "zN"]. right_format: str. Data format of right matrix. Supported data format list ["zZ", "nZ", "zN"]. out_format: str. Data format of output tensor. Supported data format list ["zZ", "nZ", "zN"]. transpose_x: Boolean. Specifies whether x is transposed or not. transpose_y: Boolean. Specifies whether y is transposed or not. has_bias: Boolean. Specifies whether bias tensor exists or not. attrs: Dict. Used in matmul computation. Note: before call matmul, 2d to Fractal is needed. Returns: akg.tvm.Tensor with type out_dtype. """ # vc_util.ops_dtype_check([x.dtype, y.dtype], vc_util.DtypeForDavinci.ALL_FLOAT) # shape_x = [shape_element.value for shape_element in x.shape] # vc_util.check_shape(shape_x) # shape_y = [shape_element.value for shape_element in y.shape] # vc_util.check_shape(shape_y) """ m = akg.tvm.var("I2") n = akg.tvm.var("I1") k = akg.tvm.var("KO") x = akg.tvm.placeholder((1, m, k, 16, 16), name='A', dtype=x.dtype) y = akg.tvm.placeholder((1, k, n, 16, 16), name='B', dtype=y.dtype) # b = akg.tvm.placeholder((1, m, 16, n, 16), name='BIAS') """ out = matmul4D_compute(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs) attr_map = {"pragma_rmselfdep": False, "enable_post_poly_loop_partition": False, "enable_multicore": False, "enable_isolate_loop": False, "enable_double_buffer": False} return out, attr_map
def matmul(x, y, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False, transpose_y=False, has_bias=False, attrs=None): """ Computes matrix multiplication x * y + b. Args: x: akg.tvm.Tensor of type int8, uint8, float16, float32, int32. Left matrix. y: akg.tvm.Tensor of same type as x. Right matrix. b: akg.tvm.Tensor of same type as x. Bias tensor. out_dtype: str. Data type of output tensor. left_format: str. Data format of left matrix. Supported data format list ["zZ", "nZ", "zN"]. right_format: str. Data format of right matrix. Supported data format list ["zZ", "nZ", "zN"]. out_format: str. Data format of output tensor. Supported data format list ["zZ", "nZ", "zN"]. transpose_x: Boolean. Specifies whether x is transposed or not. transpose_y: Boolean. Specifies whether y is transposed or not. has_bias: Boolean. Specifies whether bias tensor exists or not. attrs: Dict. Used in matmul computation. Note: before call matmul, 2d to Fractal is needed. Returns: akg.tvm.Tensor with type out_dtype. """ # vc_util.ops_dtype_check([x.dtype, y.dtype], vc_util.DtypeForDavinci.ALL_FLOAT) # shape_x = [shape_element.value for shape_element in x.shape] # vc_util.check_shape(shape_x) # shape_y = [shape_element.value for shape_element in y.shape] # vc_util.check_shape(shape_y) """ m = akg.tvm.var("I2") n = akg.tvm.var("I1") k = akg.tvm.var("KO") x = akg.tvm.placeholder((1, m, k, 16, 16), name='A', dtype=x.dtype) y = akg.tvm.placeholder((1, k, n, 16, 16), name='B', dtype=y.dtype) # b = akg.tvm.placeholder((1, m, 16, n, 16), name='BIAS') """ out = matmul4D_compute(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs) attr_map = {"pragma_rmselfdep": False, "enable_post_poly_loop_partition": False, "enable_multicore": False, "enable_isolate_loop": False, "enable_double_buffer": False} return out, attr_map
Python
def matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs): ''' There are four types of fractal format in Davinci core: zZ, zN, nZ, nN general matmul format left_trans: False right_trans False: zZ * nZ = zN left_trans: True right_trans False: nN * nZ = zN left_trans: False right_trans True : zZ * zN = zN left_trans: True right_trans True : nN * zN = zN Now we need to support: zN * nZ = zN use left_format to specify, left matrix data format use right_format to specify, right matrix data format ''' batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y) m = (m + 15) // 16 * 16 n = (n + 15) // 16 * 16 k = (k + 15) // 16 * 16 shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format) mod = dynamic_matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs) # Generate data m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format) # mod launch output = np.full(out_shape, np.nan, out_dtype) if bias == 0: output = utils.mod_launch(mod, (m_x, m_y, output, 1, 1, 1, 1, 1, 1, 1, 1, 1), outputs=(2,), expect=bench_mark) elif bias == 1: output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark) # compare result rtol, atol = get_rtol_atol("matmul", dtype) compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True) # compare_result = utils.result_compare(output, bench_mark, r_tol=5e-3) return (m_x, m_y), output, bench_mark, compare_result
def matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs): ''' There are four types of fractal format in Davinci core: zZ, zN, nZ, nN general matmul format left_trans: False right_trans False: zZ * nZ = zN left_trans: True right_trans False: nN * nZ = zN left_trans: False right_trans True : zZ * zN = zN left_trans: True right_trans True : nN * zN = zN Now we need to support: zN * nZ = zN use left_format to specify, left matrix data format use right_format to specify, right matrix data format ''' batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y) m = (m + 15) // 16 * 16 n = (n + 15) // 16 * 16 k = (k + 15) // 16 * 16 shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format) mod = dynamic_matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs) # Generate data m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format) # mod launch output = np.full(out_shape, np.nan, out_dtype) if bias == 0: output = utils.mod_launch(mod, (m_x, m_y, output, 1, 1, 1, 1, 1, 1, 1, 1, 1), outputs=(2,), expect=bench_mark) elif bias == 1: output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark) # compare result rtol, atol = get_rtol_atol("matmul", dtype) compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True) # compare_result = utils.result_compare(output, bench_mark, r_tol=5e-3) return (m_x, m_y), output, bench_mark, compare_result
Python
def FloorDiv(data1, data2, target=utils.CCE): """ Calculate x/y, and always returns an integer which is floored. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32. data2 (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor, has type of int32. Supported Platforms: 'Ascend' """ utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_FLOAT) shape1 = [x.value for x in data1.shape] utils.check_shape(shape1) shape2 = [x.value for x in data2.shape] utils.check_shape(shape2) if product_is_mini(): rec = Reciprocal(data2, high_precision=True, target=target) res = data1 * rec else: res = akg.topi.divide(data1, data2) res = akg.lang.ascend.floor(res) return res
def FloorDiv(data1, data2, target=utils.CCE): """ Calculate x/y, and always returns an integer which is floored. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32. data2 (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor, has type of int32. Supported Platforms: 'Ascend' """ utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_FLOAT) shape1 = [x.value for x in data1.shape] utils.check_shape(shape1) shape2 = [x.value for x in data2.shape] utils.check_shape(shape2) if product_is_mini(): rec = Reciprocal(data2, high_precision=True, target=target) res = data1 * rec else: res = akg.topi.divide(data1, data2) res = akg.lang.ascend.floor(res) return res
Python
def _tan_2x_multi(input_x, times): """calculating tan x by calculating tan (x/2^times) and using double angle formula multiple times""" # Calculate tan (x/2^times) if input_x.dtype == FLOAT_16 and product_is_mini(): input_x_divide = topi.multiply(input_x, tvm.const(1.0/(2.0**times), FLOAT_16)) res = _tan_expand(input_x_divide) else: input_x_divide = topi.multiply(input_x, 1.0/(2.0**times)) res = _tan_expand(input_x_divide) while times != 0: # using double angle formula: tan 2x = 2*tan x/(1-tan x*tan x) if input_x.dtype == FLOAT_16 and product_is_mini(): res_numerator = topi.multiply(res, tvm.const(2.0, FLOAT_16)) tanx_square = topi.multiply(res, res) res_denominator = topi.add(topi.multiply(tanx_square, tvm.const(-1.0, FLOAT_16)), tvm.const(1.0, FLOAT_16)) else: res_numerator = topi.multiply(res, 2.0) tanx_square = topi.multiply(res, res) res_denominator = topi.add(topi.multiply(tanx_square, -1.0), 1.0) if product_is_mini(): res = Mul(res_numerator, Reciprocal(res_denominator, target=utils.CCE), utils.CCE) else: res = divide.Divide(res_numerator, res_denominator, utils.CCE) times = times - 1 return res
def _tan_2x_multi(input_x, times): """calculating tan x by calculating tan (x/2^times) and using double angle formula multiple times""" # Calculate tan (x/2^times) if input_x.dtype == FLOAT_16 and product_is_mini(): input_x_divide = topi.multiply(input_x, tvm.const(1.0/(2.0**times), FLOAT_16)) res = _tan_expand(input_x_divide) else: input_x_divide = topi.multiply(input_x, 1.0/(2.0**times)) res = _tan_expand(input_x_divide) while times != 0: # using double angle formula: tan 2x = 2*tan x/(1-tan x*tan x) if input_x.dtype == FLOAT_16 and product_is_mini(): res_numerator = topi.multiply(res, tvm.const(2.0, FLOAT_16)) tanx_square = topi.multiply(res, res) res_denominator = topi.add(topi.multiply(tanx_square, tvm.const(-1.0, FLOAT_16)), tvm.const(1.0, FLOAT_16)) else: res_numerator = topi.multiply(res, 2.0) tanx_square = topi.multiply(res, res) res_denominator = topi.add(topi.multiply(tanx_square, -1.0), 1.0) if product_is_mini(): res = Mul(res_numerator, Reciprocal(res_denominator, target=utils.CCE), utils.CCE) else: res = divide.Divide(res_numerator, res_denominator, utils.CCE) times = times - 1 return res
Python
def apply_adam_run(shape, dtype, use_nesterov=None, attrs=None): """run function for dsl function apply_adam.""" scalar_shape = (1,) var_shape, m_shape, v_shape = [shape] * 3 beta1_power_shape, beta2_power_shape, lr_shape, beta1_shape, beta2_shape, epsilon_shape = [scalar_shape] * 6 grad_shape = shape shapes = [var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape, beta1_shape, beta2_shape, epsilon_shape, grad_shape] dtypes = [dtype] * 10 if use_nesterov is None: op_attrs = None else: op_attrs = [use_nesterov] mod = utils.op_build_test(apply_adam, shapes, dtypes, op_attrs, kernel_name='apply_adam', attrs=attrs) expects, (var, m, v, grad), (beta1_power, beta2_power, lr, beta1, beta2, epsilon) = gen_data(dtype, shape, use_nesterov) outputs = utils.mod_launch(mod, (var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad), outputs=(0, 1, 2)) rtol, atol = get_rtol_atol("apply_adam", dtype) compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects)) inputs = (var, m, v, grad) return inputs, outputs, expects, all(compare_result)
def apply_adam_run(shape, dtype, use_nesterov=None, attrs=None): """run function for dsl function apply_adam.""" scalar_shape = (1,) var_shape, m_shape, v_shape = [shape] * 3 beta1_power_shape, beta2_power_shape, lr_shape, beta1_shape, beta2_shape, epsilon_shape = [scalar_shape] * 6 grad_shape = shape shapes = [var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape, beta1_shape, beta2_shape, epsilon_shape, grad_shape] dtypes = [dtype] * 10 if use_nesterov is None: op_attrs = None else: op_attrs = [use_nesterov] mod = utils.op_build_test(apply_adam, shapes, dtypes, op_attrs, kernel_name='apply_adam', attrs=attrs) expects, (var, m, v, grad), (beta1_power, beta2_power, lr, beta1, beta2, epsilon) = gen_data(dtype, shape, use_nesterov) outputs = utils.mod_launch(mod, (var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad), outputs=(0, 1, 2)) rtol, atol = get_rtol_atol("apply_adam", dtype) compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects)) inputs = (var, m, v, grad) return inputs, outputs, expects, all(compare_result)
Python
def gen_data(dtype, shape, use_nesterov=False): """Generate data for testing the op""" # tensors var = random_gaussian(shape).astype(dtype) m = random_gaussian(shape).astype(dtype) v = np.abs(random_gaussian(shape).astype(dtype)) grad = random_gaussian(shape).astype(dtype) tensors = [var, m, v, grad] # scalars lr = np.array([0.001], dtype) beta1 = np.array([0.9], dtype) beta2 = np.array([0.999], dtype) epsilon = np.array([1e-7], dtype) t = np.random.randint(1, 100, size=(1,)) beta1_power = np.array([beta1 ** t], dtype) beta2_power = np.array([beta2 ** t], dtype) sclars = [beta1_power, beta2_power, lr, beta1, beta2, epsilon] # expects lr_coffient = np.sqrt(1.0 - beta2_power)/(1.0 - beta1_power) lr_t = lr * lr_coffient m_t = m + (1.0 - beta1) * (grad - m) v_t = v + (1.0 - beta2) * (grad * grad - v) v_t_sqrt = np.sqrt(v_t) if use_nesterov: var_t = var - (lr_t * (m_t * beta1 + (1.0-beta1)*grad))/(epsilon + v_t_sqrt) else: var_t = var - (lr_t * m_t)/(epsilon + v_t_sqrt) expects = [var_t, m_t, v_t] return expects, tensors, sclars
def gen_data(dtype, shape, use_nesterov=False): """Generate data for testing the op""" # tensors var = random_gaussian(shape).astype(dtype) m = random_gaussian(shape).astype(dtype) v = np.abs(random_gaussian(shape).astype(dtype)) grad = random_gaussian(shape).astype(dtype) tensors = [var, m, v, grad] # scalars lr = np.array([0.001], dtype) beta1 = np.array([0.9], dtype) beta2 = np.array([0.999], dtype) epsilon = np.array([1e-7], dtype) t = np.random.randint(1, 100, size=(1,)) beta1_power = np.array([beta1 ** t], dtype) beta2_power = np.array([beta2 ** t], dtype) sclars = [beta1_power, beta2_power, lr, beta1, beta2, epsilon] # expects lr_coffient = np.sqrt(1.0 - beta2_power)/(1.0 - beta1_power) lr_t = lr * lr_coffient m_t = m + (1.0 - beta1) * (grad - m) v_t = v + (1.0 - beta2) * (grad * grad - v) v_t_sqrt = np.sqrt(v_t) if use_nesterov: var_t = var - (lr_t * (m_t * beta1 + (1.0-beta1)*grad))/(epsilon + v_t_sqrt) else: var_t = var - (lr_t * m_t)/(epsilon + v_t_sqrt) expects = [var_t, m_t, v_t] return expects, tensors, sclars
Python
def Abs(in_data, target=utils.CCE): """ Compute absolute value of a tensor. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, unit8, int32. Returns: tvm.tensor.Tensor of same type and shape as data. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) utils.check_shape(in_data.shape) in_type = in_data.dtype if target == utils.CCE: utils.ops_dtype_check(in_type, utils.DtypeForDavinci.ALL_TYPES) need_cast_dtype = ["int8", "int32", "uint8"] if in_type in need_cast_dtype: in_data = akg.tvm.compute(in_data.shape, lambda *indice: in_data(*indice).astype("float16"), name='type_cast') output = akg.tvm.compute(in_data.shape, lambda *index: akg.tvm.abs(in_data(*index)), name='abs_value') if in_type in need_cast_dtype: output = akg.tvm.compute(in_data.shape, lambda *indice: output(*indice).astype(in_type), name='res') else: if in_type == 'float16': in_data = akg.topi.cast(in_data, 'float32') output = akg.topi.abs(in_data) if in_type == 'float16': output = akg.topi.cast(output, 'float16') return output
def Abs(in_data, target=utils.CCE): """ Compute absolute value of a tensor. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, unit8, int32. Returns: tvm.tensor.Tensor of same type and shape as data. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) utils.check_shape(in_data.shape) in_type = in_data.dtype if target == utils.CCE: utils.ops_dtype_check(in_type, utils.DtypeForDavinci.ALL_TYPES) need_cast_dtype = ["int8", "int32", "uint8"] if in_type in need_cast_dtype: in_data = akg.tvm.compute(in_data.shape, lambda *indice: in_data(*indice).astype("float16"), name='type_cast') output = akg.tvm.compute(in_data.shape, lambda *index: akg.tvm.abs(in_data(*index)), name='abs_value') if in_type in need_cast_dtype: output = akg.tvm.compute(in_data.shape, lambda *indice: output(*indice).astype(in_type), name='res') else: if in_type == 'float16': in_data = akg.topi.cast(in_data, 'float32') output = akg.topi.abs(in_data) if in_type == 'float16': output = akg.topi.cast(output, 'float16') return output
Python
def tanh_fdiff(head, inp): """ In order to achieve higher precision, we self-define differentiate with simplify calculation. .. math:: \\frac{d_{tanh}}{d_x} = 4e^{-2x} / (1+2e^{-2x}+e^{-4x}) """ data_abs = akg.topi.abs(inp) dtype = inp.dtype exp_2 = data_abs * akg.tvm.const(-2.0, dtype) exp_4 = data_abs * akg.tvm.const(-4.0, dtype) exp_2_value = akg.topi.exp(exp_2) exp_4_value = akg.topi.exp(exp_4) exp_2_value_2 = exp_2_value * akg.tvm.const(2.0, dtype) exp_2_value_4 = exp_2_value * akg.tvm.const(4.0, dtype) sum_dino_exp = akg.tvm.const(1.0, dtype) + exp_2_value_2 + exp_4_value dep_tanh = exp_2_value_4/sum_dino_exp res = akg.topi.multiply(head, dep_tanh) return res
def tanh_fdiff(head, inp): """ In order to achieve higher precision, we self-define differentiate with simplify calculation. .. math:: \\frac{d_{tanh}}{d_x} = 4e^{-2x} / (1+2e^{-2x}+e^{-4x}) """ data_abs = akg.topi.abs(inp) dtype = inp.dtype exp_2 = data_abs * akg.tvm.const(-2.0, dtype) exp_4 = data_abs * akg.tvm.const(-4.0, dtype) exp_2_value = akg.topi.exp(exp_2) exp_4_value = akg.topi.exp(exp_4) exp_2_value_2 = exp_2_value * akg.tvm.const(2.0, dtype) exp_2_value_4 = exp_2_value * akg.tvm.const(4.0, dtype) sum_dino_exp = akg.tvm.const(1.0, dtype) + exp_2_value_2 + exp_4_value dep_tanh = exp_2_value_4/sum_dino_exp res = akg.topi.multiply(head, dep_tanh) return res
Python
def gelu_ad(head, in_data, target="cce"): """Compute gradient for gelu operator using automatic differentiate.""" res = gelu.gelu(in_data) jacs = list(akg.differentiate(res, [in_data], head)) return jacs[0]
def gelu_ad(head, in_data, target="cce"): """Compute gradient for gelu operator using automatic differentiate.""" res = gelu.gelu(in_data) jacs = list(akg.differentiate(res, [in_data], head)) return jacs[0]
Python
def gelu_ad_custom(head, in_data, target="cce"): """ Automatic differentiation of gelu with customize function. In order to achieve higher precision, we could also self-define tanh part differentiate with simplify calculation. """ dtype = in_data.dtype const1 = akg.tvm.const(0.044715, dtype) const2 = akg.tvm.const(0.7978845, dtype) const3 = akg.tvm.const(0.1070322, dtype) tmp0 = akg.topi.multiply(in_data, in_data) pow0 = akg.topi.multiply(tmp0, in_data) mul0 = pow0 * const1 add0 = in_data + mul0 mul1 = add0 * const2 tanh_res = Tanh(mul1) add1 = tanh_res + akg.tvm.const(1, dtype) mul2 = add1 * akg.tvm.const(0.5, dtype) mul3 = in_data * mul2 res = mul3 def gelu_diff(out, inp, head, ad_attrs, new_array_pld): temp = tanh_fdiff(head, mul1) return [temp * (akg.tvm.const(0.7978845, dtype) + const3*inp[0]*inp[0])] jacs = list(akg.differentiate(res, [in_data], head, None, None, override={tanh_res: ([in_data], gelu_diff)})) return jacs[0]
def gelu_ad_custom(head, in_data, target="cce"): """ Automatic differentiation of gelu with customize function. In order to achieve higher precision, we could also self-define tanh part differentiate with simplify calculation. """ dtype = in_data.dtype const1 = akg.tvm.const(0.044715, dtype) const2 = akg.tvm.const(0.7978845, dtype) const3 = akg.tvm.const(0.1070322, dtype) tmp0 = akg.topi.multiply(in_data, in_data) pow0 = akg.topi.multiply(tmp0, in_data) mul0 = pow0 * const1 add0 = in_data + mul0 mul1 = add0 * const2 tanh_res = Tanh(mul1) add1 = tanh_res + akg.tvm.const(1, dtype) mul2 = add1 * akg.tvm.const(0.5, dtype) mul3 = in_data * mul2 res = mul3 def gelu_diff(out, inp, head, ad_attrs, new_array_pld): temp = tanh_fdiff(head, mul1) return [temp * (akg.tvm.const(0.7978845, dtype) + const3*inp[0]*inp[0])] jacs = list(akg.differentiate(res, [in_data], head, None, None, override={tanh_res: ([in_data], gelu_diff)})) return jacs[0]
Python
def avgpool_set_dim_func(a_value, kernel, stride, pad): """set dim info to attr with avgpool_set_dim_map""" key = [] key.append(tuple(get_shape(a_value))) key.append(kernel) key.append(stride) if isinstance(pad, list): pad = tuple(pad) key.append(pad) key.append(a_value.dtype) hash_key = str(tuple(key)) if hash_key in avgpool_set_dim_map.keys(): return ct_util.set_dims(avgpool_set_dim_map[hash_key]), hash_key return "", hash_key
def avgpool_set_dim_func(a_value, kernel, stride, pad): """set dim info to attr with avgpool_set_dim_map""" key = [] key.append(tuple(get_shape(a_value))) key.append(kernel) key.append(stride) if isinstance(pad, list): pad = tuple(pad) key.append(pad) key.append(a_value.dtype) hash_key = str(tuple(key)) if hash_key in avgpool_set_dim_map.keys(): return ct_util.set_dims(avgpool_set_dim_map[hash_key]), hash_key return "", hash_key
Python
def avg_pool_5d_hybrid(a_value, kernel, stride, strategy): """avgpool with 5d case via hybrid""" kernel_h, kernel_w = kernel stride_h, stride_w = stride shape = get_shape(a_value) batch_size, c1_, in_size_h, in_size_w, c0_ = shape dtype = a_value.dtype if len(shape) != 5: raise ValueError("Only support 5-dim pooling!") if len(kernel) != 2: raise ValueError("Only support 2-dim kernel!") [pad_height_head, _, pad_width_head, _], [out_size_h, out_size_w] = \ cal_pad_shapes_by_strategy(shape, kernel, stride, strategy) avg_pre = akg.tvm.const(1.0000 / (kernel_w * kernel_h), dtype=dtype) zero = akg.tvm.const(0.0, dtype=dtype) @script(capture=locals()) def avg_pool_hybrid(inputs, zero, avg_pre): output = output_tensor((batch_size, c1_, out_size_h, out_size_w, c0_), inputs.dtype) for n in range(batch_size): for c1 in range(c1_): # Head for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, 0, ow, c0] = zero for ow in range(out_size_w): for kh in range(kernel_h): for kw in range(kernel_w): for c0 in range(c0_): if (kh >= pad_height_head) \ and (ow * stride_w + kw - pad_width_head >= 0) \ and (ow * stride_w + kw <= in_size_w + pad_width_head - 1): output[n, c1, 0, ow, c0] = output[n, c1, 0, ow, c0] +\ inputs[n, c1, kh - pad_height_head, ow * stride_w + kw - pad_width_head, c0] else: output[n, c1, 0, ow, c0] += zero for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, 0, ow, c0] *= avg_pre # Tail for oh in range(out_size_h - 1): for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, oh + 1, ow, c0] = zero for oh in range(out_size_h - 1): for ow in range(out_size_w): for kh in range(kernel_h): for kw in range(kernel_w): for c0 in range(c0_): if ((oh + 1) * stride_h + kh <= in_size_h + pad_height_head - 1)\ and (ow * stride_w + kw >= pad_width_head)\ and (ow * stride_w + kw <= in_size_w + pad_width_head - 1): output[n, c1, oh + 1, ow, c0] = output[n, c1, oh + 1, ow, c0] +\ inputs[n, c1, (oh + 1) * stride_h + kh - pad_height_head, ow * stride_w + kw - pad_width_head, c0] else: output[n, c1, oh + 1, ow, c0] += zero for oh in range(out_size_h - 1): for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, oh + 1, ow, c0] *= avg_pre return output res_value = avg_pool_hybrid(a_value, zero, avg_pre) # set dim info = dim.Dim() # first part info.setdim(index=0, axis=0, tilel1=out_size_w, tilel0=0) # ow info.setdim(index=0, axis=1, tilel1=c0_, tilel0=0) # c0 info.setdim(index=0, axis=2, tilel1=kernel_h, tilel0=0) # kh # second part info.setdim(index=1, axis=0, tilel1=out_size_h - 1, tilel0=0) # oh-1 info.setdim(index=1, axis=1, tilel1=out_size_w, tilel0=0) # ow info.setdim(index=1, axis=2, tilel1=c0_, tilel0=0) # c0 info.setdim(index=1, axis=3, tilel1=kernel_h, tilel0=0) # kh info = str(info) attrs = {DIM: info} return res_value, attrs
def avg_pool_5d_hybrid(a_value, kernel, stride, strategy): """avgpool with 5d case via hybrid""" kernel_h, kernel_w = kernel stride_h, stride_w = stride shape = get_shape(a_value) batch_size, c1_, in_size_h, in_size_w, c0_ = shape dtype = a_value.dtype if len(shape) != 5: raise ValueError("Only support 5-dim pooling!") if len(kernel) != 2: raise ValueError("Only support 2-dim kernel!") [pad_height_head, _, pad_width_head, _], [out_size_h, out_size_w] = \ cal_pad_shapes_by_strategy(shape, kernel, stride, strategy) avg_pre = akg.tvm.const(1.0000 / (kernel_w * kernel_h), dtype=dtype) zero = akg.tvm.const(0.0, dtype=dtype) @script(capture=locals()) def avg_pool_hybrid(inputs, zero, avg_pre): output = output_tensor((batch_size, c1_, out_size_h, out_size_w, c0_), inputs.dtype) for n in range(batch_size): for c1 in range(c1_): # Head for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, 0, ow, c0] = zero for ow in range(out_size_w): for kh in range(kernel_h): for kw in range(kernel_w): for c0 in range(c0_): if (kh >= pad_height_head) \ and (ow * stride_w + kw - pad_width_head >= 0) \ and (ow * stride_w + kw <= in_size_w + pad_width_head - 1): output[n, c1, 0, ow, c0] = output[n, c1, 0, ow, c0] +\ inputs[n, c1, kh - pad_height_head, ow * stride_w + kw - pad_width_head, c0] else: output[n, c1, 0, ow, c0] += zero for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, 0, ow, c0] *= avg_pre # Tail for oh in range(out_size_h - 1): for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, oh + 1, ow, c0] = zero for oh in range(out_size_h - 1): for ow in range(out_size_w): for kh in range(kernel_h): for kw in range(kernel_w): for c0 in range(c0_): if ((oh + 1) * stride_h + kh <= in_size_h + pad_height_head - 1)\ and (ow * stride_w + kw >= pad_width_head)\ and (ow * stride_w + kw <= in_size_w + pad_width_head - 1): output[n, c1, oh + 1, ow, c0] = output[n, c1, oh + 1, ow, c0] +\ inputs[n, c1, (oh + 1) * stride_h + kh - pad_height_head, ow * stride_w + kw - pad_width_head, c0] else: output[n, c1, oh + 1, ow, c0] += zero for oh in range(out_size_h - 1): for ow in range(out_size_w): for c0 in range(c0_): output[n, c1, oh + 1, ow, c0] *= avg_pre return output res_value = avg_pool_hybrid(a_value, zero, avg_pre) # set dim info = dim.Dim() # first part info.setdim(index=0, axis=0, tilel1=out_size_w, tilel0=0) # ow info.setdim(index=0, axis=1, tilel1=c0_, tilel0=0) # c0 info.setdim(index=0, axis=2, tilel1=kernel_h, tilel0=0) # kh # second part info.setdim(index=1, axis=0, tilel1=out_size_h - 1, tilel0=0) # oh-1 info.setdim(index=1, axis=1, tilel1=out_size_w, tilel0=0) # ow info.setdim(index=1, axis=2, tilel1=c0_, tilel0=0) # c0 info.setdim(index=1, axis=3, tilel1=kernel_h, tilel0=0) # kh info = str(info) attrs = {DIM: info} return res_value, attrs
Python
def avgpool_with_img2col(data, kernel, stride, strategy): """ Performs the avgpool with img2col. Note: Only support 5D format(NC1HWC0), and pooling will work on H and W. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. kernel (Union[list, tuple]): two int numbers for pooling window's size. stride (Union[list, tuple]): two int numbers for window's stride. strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or instance of list(four int numbers, as 'CONSTANTS' strategy). Support **Strategies** is the same as avgpool. Returns: tvm.tensor.Tensor, result for gradient of avgpooling. """ shape = get_shape(data) dtype = data.dtype utils.davinci_format_check(shape, "NC1HWC0", dim=5) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16) utils.check_shape(kernel, 2, "Kernel") utils.check_shape(stride, 2, "Stride") kernel_h, kernel_w = kernel in_n, in_c1, _, _, in_c0 = shape [ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \ cal_pad_shapes_by_strategy(shape, kernel, stride, strategy) pad = [ph_h, ph_t, pw_h, pw_t] pad_value = zero_const(dtype) # fmap img2col l1 -> ub in zZ format by fractal fmap_img2col_shp_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0) fmap_img2col_ub = img2col(data, fmap_img2col_shp_ub, kernel_h, kernel_w, pad, stride, pad_value, tag="") out_shape = (in_n, in_c1, out_h, out_w, in_c0) reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h") reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w") res_sum = akg.tvm.compute(out_shape, lambda n, c1, oh, ow, c0: akg.tvm.sum( fmap_img2col_ub[n, c1, reduce_axis_h, reduce_axis_w, oh, ow, c0], axis=[reduce_axis_h, reduce_axis_w]), name="pooling_avg") dividor = akg.tvm.const(kernel_h * kernel_w, dtype) output = akg.tvm.compute(out_shape, lambda *i: res_sum(*i) / dividor, name="res_value") return output
def avgpool_with_img2col(data, kernel, stride, strategy): """ Performs the avgpool with img2col. Note: Only support 5D format(NC1HWC0), and pooling will work on H and W. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. kernel (Union[list, tuple]): two int numbers for pooling window's size. stride (Union[list, tuple]): two int numbers for window's stride. strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or instance of list(four int numbers, as 'CONSTANTS' strategy). Support **Strategies** is the same as avgpool. Returns: tvm.tensor.Tensor, result for gradient of avgpooling. """ shape = get_shape(data) dtype = data.dtype utils.davinci_format_check(shape, "NC1HWC0", dim=5) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16) utils.check_shape(kernel, 2, "Kernel") utils.check_shape(stride, 2, "Stride") kernel_h, kernel_w = kernel in_n, in_c1, _, _, in_c0 = shape [ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \ cal_pad_shapes_by_strategy(shape, kernel, stride, strategy) pad = [ph_h, ph_t, pw_h, pw_t] pad_value = zero_const(dtype) # fmap img2col l1 -> ub in zZ format by fractal fmap_img2col_shp_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0) fmap_img2col_ub = img2col(data, fmap_img2col_shp_ub, kernel_h, kernel_w, pad, stride, pad_value, tag="") out_shape = (in_n, in_c1, out_h, out_w, in_c0) reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h") reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w") res_sum = akg.tvm.compute(out_shape, lambda n, c1, oh, ow, c0: akg.tvm.sum( fmap_img2col_ub[n, c1, reduce_axis_h, reduce_axis_w, oh, ow, c0], axis=[reduce_axis_h, reduce_axis_w]), name="pooling_avg") dividor = akg.tvm.const(kernel_h * kernel_w, dtype) output = akg.tvm.compute(out_shape, lambda *i: res_sum(*i) / dividor, name="res_value") return output
Python
def unsorted_segment_sum(tensor, segment_ids, num_segments, init_value=0): """ calculate segment sum, return a new tensor which is the sum along segments of a tensor. only support float16, int32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_sum(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_sum")
def unsorted_segment_sum(tensor, segment_ids, num_segments, init_value=0): """ calculate segment sum, return a new tensor which is the sum along segments of a tensor. only support float16, int32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_sum(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_sum")
Python
def unsorted_segment_mean(tensor, segment_ids, num_segments, init_value=0): """ calculate segment mean, return a new tensor which is the mean along segments of a tensor.only support float16, int32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_mean(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_mean")
def unsorted_segment_mean(tensor, segment_ids, num_segments, init_value=0): """ calculate segment mean, return a new tensor which is the mean along segments of a tensor.only support float16, int32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_mean(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_mean")
Python
def unsorted_segment_prod(tensor, segment_ids, num_segments, init_value=0): """ calculate segment prod, return a new tensor which is the prod along segments of a tensor,only support f16, s32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_prod(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_prod")
def unsorted_segment_prod(tensor, segment_ids, num_segments, init_value=0): """ calculate segment prod, return a new tensor which is the prod along segments of a tensor,only support f16, s32 Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment num_segments (tvm.tensor.Tensor): Number of distinct segment ids init_value (int): Initial value Returns: tvm.tensor.Tensor, segment_prod(tensor , segment_ids) """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_prod")
Python
def unsorted_segment_min(tensor, segment_ids, num_segments, init_value=0): """ calculate segment min, return a new tensor which is the min along segments of a tensor. only support float16, int32. Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment. num_segments (tvm.tensor.Tensor): Number of distinct segment ids. init_value (int): Initial value. Returns: tvm.tensor.Tensor, segment_min(tensor , segment_ids). """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_min")
def unsorted_segment_min(tensor, segment_ids, num_segments, init_value=0): """ calculate segment min, return a new tensor which is the min along segments of a tensor. only support float16, int32. Args: tensor (tvm.tensor.Tensor): Input tensor segment_ids (list): Index of each segment. num_segments (tvm.tensor.Tensor): Number of distinct segment ids. init_value (int): Initial value. Returns: tvm.tensor.Tensor, segment_min(tensor , segment_ids). """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_min")
Python
def unsorted_segment_max(tensor, segment_ids, num_segments, init_value=0): """ calculate segment max, return a new tensor which is the max along segments of a tensor. only support float16, int32. Args: tensor (tvm.tensor.Tensor): Input tensor. segment_ids (list): Index of each segment. num_segments (tvm.tensor.Tensor): Number of distinct segment ids. init_value (int): Initial value. Returns: tvm.tensor.Tensor, segment_max(tensor , segment_ids). """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_max")
def unsorted_segment_max(tensor, segment_ids, num_segments, init_value=0): """ calculate segment max, return a new tensor which is the max along segments of a tensor. only support float16, int32. Args: tensor (tvm.tensor.Tensor): Input tensor. segment_ids (list): Index of each segment. num_segments (tvm.tensor.Tensor): Number of distinct segment ids. init_value (int): Initial value. Returns: tvm.tensor.Tensor, segment_max(tensor , segment_ids). """ return segment_op(tensor, segment_ids, num_segments, init_value, tensor.dtype, "segment_max")
Python
def segment_compute(indices): """compute_func of unsorted segment mean arithmetic operator""" unique_id = [] for i in segment_ids: if i not in unique_id: unique_id.append(i) def compute_outer_dim(i): new_segment_id = list(segment_ids)[:] if i in unique_id: idx = new_segment_id.index(i) new_segment_id[idx] = -1 tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) for _ in range(segment_ids.count(i) - 1): new_segment_id[idx] = -1 idx = new_segment_id.index(i) if op in ("segment_sum", "segment_mean"): tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) + tmp elif op == "segment_prod": tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) * tmp elif op == "segment_min": tmp = akg.tvm.min(tensor[(idx,) + indices[1:]].astype(output_dtype), tmp) elif op == "segment_max": tmp = akg.tvm.max(tensor[(idx,) + indices[1:]].astype(output_dtype), tmp) else: raise RuntimeError("operation %s not support yet" % op) if op == "segment_mean": tmp = tmp // akg.tvm.const(segment_ids.count(i), output_dtype) else: tmp = akg.tvm.const(init_value, tensor.dtype) return tmp res = compute_outer_dim(0) for i in range(num_segments)[1:]: res = akg.tvm.select(indices[0] == i, compute_outer_dim(i), res) return res
def segment_compute(indices): """compute_func of unsorted segment mean arithmetic operator""" unique_id = [] for i in segment_ids: if i not in unique_id: unique_id.append(i) def compute_outer_dim(i): new_segment_id = list(segment_ids)[:] if i in unique_id: idx = new_segment_id.index(i) new_segment_id[idx] = -1 tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) for _ in range(segment_ids.count(i) - 1): new_segment_id[idx] = -1 idx = new_segment_id.index(i) if op in ("segment_sum", "segment_mean"): tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) + tmp elif op == "segment_prod": tmp = tensor[(idx,) + indices[1:]].astype(output_dtype) * tmp elif op == "segment_min": tmp = akg.tvm.min(tensor[(idx,) + indices[1:]].astype(output_dtype), tmp) elif op == "segment_max": tmp = akg.tvm.max(tensor[(idx,) + indices[1:]].astype(output_dtype), tmp) else: raise RuntimeError("operation %s not support yet" % op) if op == "segment_mean": tmp = tmp // akg.tvm.const(segment_ids.count(i), output_dtype) else: tmp = akg.tvm.const(init_value, tensor.dtype) return tmp res = compute_outer_dim(0) for i in range(num_segments)[1:]: res = akg.tvm.select(indices[0] == i, compute_outer_dim(i), res) return res
Python
def fake_quant_with_min_max_vars_per_channel_gradient_compute(input_gradients, inputs_data, min_broadcast, max_broadcast, num_bits=8, narrow_range=False): """Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.""" shape = get_shape(inputs_data) sum_axis = [x for x in range(0, len(shape) - 1)] dtype = inputs_data.dtype nudged_min, nudged_max, _ = nudged_min_max_compute(min_broadcast, max_broadcast, num_bits, narrow_range) # both zero yields zero bool_both_zero_value = bool_both_zero_compute(min_broadcast, max_broadcast) bool_both_zero_negate = _bool_negate(bool_both_zero_value) bool_less_equal_nudged_max = _less_equal_compare_float32(inputs_data, nudged_max) bool_more_equal_nudged_min = _less_equal_compare_float32(nudged_min, inputs_data) bool_between_nudged_min_max = topi.multiply(bool_less_equal_nudged_max, bool_more_equal_nudged_min) # gradient is 1 if input in [min, max] else 0 backprops_input_tmp = topi.multiply(bool_between_nudged_min_max, input_gradients) backprops_bool_both_zero = topi.multiply(backprops_input_tmp, bool_both_zero_value) # if min and max are both zero, gradients is input_gradients input_gradients_both_zero = topi.multiply(input_gradients, bool_both_zero_negate) backprops_input = topi.add(backprops_bool_both_zero, input_gradients_both_zero) # gradients for min is input_gradients if inputs_data < nudged_min else 0 bool_less_nudged_min = _bool_negate(bool_more_equal_nudged_min) output_backprop_min_tmp = topi.multiply(bool_less_nudged_min, input_gradients) # gradients for min is 0 if min and max are both 0 output_backprop_min_bool = topi.multiply(output_backprop_min_tmp, bool_both_zero_value) if sum_axis == []: output_backprop_min = output_backprop_min_bool else: output_backprop_min = topi.sum(output_backprop_min_bool, sum_axis) # gradients for max is input_gradients if inputs_data > nudged_max else 0 bool_more_nudged_max = _bool_negate(bool_less_equal_nudged_max) output_backprop_max_tmp = topi.multiply(bool_more_nudged_max, input_gradients) # gradients for max is 0 if min and max are both 0 output_backprop_max_bool = topi.multiply(output_backprop_max_tmp, bool_both_zero_value) if sum_axis == []: output_backprop_max = output_backprop_max_bool else: output_backprop_max = topi.sum(output_backprop_max_bool, sum_axis) return backprops_input, output_backprop_min, output_backprop_max
def fake_quant_with_min_max_vars_per_channel_gradient_compute(input_gradients, inputs_data, min_broadcast, max_broadcast, num_bits=8, narrow_range=False): """Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.""" shape = get_shape(inputs_data) sum_axis = [x for x in range(0, len(shape) - 1)] dtype = inputs_data.dtype nudged_min, nudged_max, _ = nudged_min_max_compute(min_broadcast, max_broadcast, num_bits, narrow_range) # both zero yields zero bool_both_zero_value = bool_both_zero_compute(min_broadcast, max_broadcast) bool_both_zero_negate = _bool_negate(bool_both_zero_value) bool_less_equal_nudged_max = _less_equal_compare_float32(inputs_data, nudged_max) bool_more_equal_nudged_min = _less_equal_compare_float32(nudged_min, inputs_data) bool_between_nudged_min_max = topi.multiply(bool_less_equal_nudged_max, bool_more_equal_nudged_min) # gradient is 1 if input in [min, max] else 0 backprops_input_tmp = topi.multiply(bool_between_nudged_min_max, input_gradients) backprops_bool_both_zero = topi.multiply(backprops_input_tmp, bool_both_zero_value) # if min and max are both zero, gradients is input_gradients input_gradients_both_zero = topi.multiply(input_gradients, bool_both_zero_negate) backprops_input = topi.add(backprops_bool_both_zero, input_gradients_both_zero) # gradients for min is input_gradients if inputs_data < nudged_min else 0 bool_less_nudged_min = _bool_negate(bool_more_equal_nudged_min) output_backprop_min_tmp = topi.multiply(bool_less_nudged_min, input_gradients) # gradients for min is 0 if min and max are both 0 output_backprop_min_bool = topi.multiply(output_backprop_min_tmp, bool_both_zero_value) if sum_axis == []: output_backprop_min = output_backprop_min_bool else: output_backprop_min = topi.sum(output_backprop_min_bool, sum_axis) # gradients for max is input_gradients if inputs_data > nudged_max else 0 bool_more_nudged_max = _bool_negate(bool_less_equal_nudged_max) output_backprop_max_tmp = topi.multiply(bool_more_nudged_max, input_gradients) # gradients for max is 0 if min and max are both 0 output_backprop_max_bool = topi.multiply(output_backprop_max_tmp, bool_both_zero_value) if sum_axis == []: output_backprop_max = output_backprop_max_bool else: output_backprop_max = topi.sum(output_backprop_max_bool, sum_axis) return backprops_input, output_backprop_min, output_backprop_max
Python
def EqualCount(x, y, target=utils.CCE): """ compute equal num of x and y. Args: x (tvm.tensor.Tensor): Tensor of type int32. y (tvm.tensor.Tensor): Tensor of type int32. Returns: tvm.tensor.Tensor, equal num, type is int32. Supported Platforms: 'Ascend' """ # check shapes shape1 = get_shape(x) shape2 = get_shape(y) shapes = [shape1, shape2] for _, shape_ in enumerate(shapes): utils.check_shape(shape_) if len(shape1) != 1 or len(shape2) != 1: raise RuntimeError("Two inputs should all be one dim!") # check types dtype = x.dtype utils.ops_dtype_check([x.dtype, y.dtype], utils.DtypeForDavinci.INT32) # Due to instruction limitations, the int32 data needs to be converted to # float16 or float32. # When the int32 data is casted to float16, there may be overflow problems, # so as far as possible the int32 data is casted to float32. orig_dtype = dtype if product_is_mini(): dtype = "float16" else: dtype = "float32" x = Cast(x, dtype, target) y = Cast(y, dtype, target) shape1, shape2, shape = produce_shapes(shape1, shape2) t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t") f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f") x = akg.topi.broadcast_to(x, shape) y = akg.topi.broadcast_to(y, shape) z = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select( x[indice] == y[indice], t[indice], f[indice]), name="z") res = Sum(z, target=target) if res.dtype != orig_dtype: res = Cast(res, orig_dtype, target) return res
def EqualCount(x, y, target=utils.CCE): """ compute equal num of x and y. Args: x (tvm.tensor.Tensor): Tensor of type int32. y (tvm.tensor.Tensor): Tensor of type int32. Returns: tvm.tensor.Tensor, equal num, type is int32. Supported Platforms: 'Ascend' """ # check shapes shape1 = get_shape(x) shape2 = get_shape(y) shapes = [shape1, shape2] for _, shape_ in enumerate(shapes): utils.check_shape(shape_) if len(shape1) != 1 or len(shape2) != 1: raise RuntimeError("Two inputs should all be one dim!") # check types dtype = x.dtype utils.ops_dtype_check([x.dtype, y.dtype], utils.DtypeForDavinci.INT32) # Due to instruction limitations, the int32 data needs to be converted to # float16 or float32. # When the int32 data is casted to float16, there may be overflow problems, # so as far as possible the int32 data is casted to float32. orig_dtype = dtype if product_is_mini(): dtype = "float16" else: dtype = "float32" x = Cast(x, dtype, target) y = Cast(y, dtype, target) shape1, shape2, shape = produce_shapes(shape1, shape2) t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t") f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f") x = akg.topi.broadcast_to(x, shape) y = akg.topi.broadcast_to(y, shape) z = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select( x[indice] == y[indice], t[indice], f[indice]), name="z") res = Sum(z, target=target) if res.dtype != orig_dtype: res = Cast(res, orig_dtype, target) return res
Python
def fusion_gen_data(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias, momentum=0.9, eps=1e-3, has_add=False, has_relu=False): """Generate datas. Generate input datas, calculate expect results, and generate output_buffers. Args: fm_shape: Shape of convolution's input. filter_shape: Shape of convolution's filter. dtype: Data type of convolution's data. pad: list of 4 ints for convolution's pad parameters. stride: list of 2 insts for convolution's stride parameters. dilation: list of 2 ints for convolution's dilation parameters. use_bias: Whether convolution should consider bias. momentum: Momentum for moving average. eps: A small value for avoiding divide zero. other_branch_shape: Shape of data that comes from other branch and will be added later. has_add: Whether this fusion function has add operator. has_relu: Whether this fusion function has relu operator. Returns: inputs: A tuple contain all generated input data. output_buffers: A tuple contain all generated output buffer. expects: A tuple contain expect results. """ block_size = 16 conv_in_shapes, conv_in_dtypes, _, shapes_4d, bn_shape = \ get_convbn1_compile_param(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias) mid_shape = (1, bn_shape[1], 1, 1, bn_shape[4]) mid_dtype = "float32" x_4d, filter_conv, bias = gen_convbn1_inputs( conv_in_shapes, conv_in_dtypes, shapes_4d, use_bias) gamma, beta, running_mean, running_var_tmp = \ gen_inputs_directly([mid_shape] * 4, [mid_dtype] * 4) running_var = abs(running_var_tmp) inputs = [] inputs_conv_bn1 = [] in_n, in_channel, in_h, in_w = x_4d.shape inputs_conv_bn1.append(x_4d.reshape( in_n, in_channel // block_size, block_size, in_h, in_w).transpose( 0, 1, 3, 4, 2).copy()) weight_n, weight_channel, weight_h, weight_w = filter_conv.shape inputs_conv_bn1.append(( filter_conv.reshape( weight_n, weight_channel // block_size, block_size, weight_h, weight_w).transpose(1, 3, 4, 0, 2).copy() ).reshape( weight_channel // block_size * weight_h * weight_w, weight_n // block_size, block_size, block_size)) if use_bias: bias_n = bias.shape[0] inputs_conv_bn1.append( bias.reshape(1, bias_n // block_size, 1, 1, block_size)) inputs.append(inputs_conv_bn1) if has_add: ob_data = gen_inputs_directly([bn_shape], [dtype]) inputs_bn2_fusion = (ob_data, gamma, beta, running_mean, running_var) to_pass_ins = (x_4d, filter_conv, bias, gamma, beta, running_mean, running_var, ob_data) else: inputs_bn2_fusion = (gamma, beta, running_mean, running_var) to_pass_ins = (x_4d, filter_conv, bias, gamma, beta, running_mean, running_var, None) inputs.append(inputs_bn2_fusion) expects = benchmark(*to_pass_ins, pad, stride, dilation, momentum, eps, has_add, has_relu) output_buffers = [] output_buffers.append(tuple( [np.full(expects[0].shape, 0.0, dtype)] + [np.full(mid_shape, 0.0, mid_dtype)] * 2)) output_buffers.append(tuple( [np.full(mid_shape, 0.0, mid_dtype)] * 3)) output_buffers.append(malloc_out_buffer(expects[1])) return inputs, tuple(output_buffers), expects
def fusion_gen_data(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias, momentum=0.9, eps=1e-3, has_add=False, has_relu=False): """Generate datas. Generate input datas, calculate expect results, and generate output_buffers. Args: fm_shape: Shape of convolution's input. filter_shape: Shape of convolution's filter. dtype: Data type of convolution's data. pad: list of 4 ints for convolution's pad parameters. stride: list of 2 insts for convolution's stride parameters. dilation: list of 2 ints for convolution's dilation parameters. use_bias: Whether convolution should consider bias. momentum: Momentum for moving average. eps: A small value for avoiding divide zero. other_branch_shape: Shape of data that comes from other branch and will be added later. has_add: Whether this fusion function has add operator. has_relu: Whether this fusion function has relu operator. Returns: inputs: A tuple contain all generated input data. output_buffers: A tuple contain all generated output buffer. expects: A tuple contain expect results. """ block_size = 16 conv_in_shapes, conv_in_dtypes, _, shapes_4d, bn_shape = \ get_convbn1_compile_param(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias) mid_shape = (1, bn_shape[1], 1, 1, bn_shape[4]) mid_dtype = "float32" x_4d, filter_conv, bias = gen_convbn1_inputs( conv_in_shapes, conv_in_dtypes, shapes_4d, use_bias) gamma, beta, running_mean, running_var_tmp = \ gen_inputs_directly([mid_shape] * 4, [mid_dtype] * 4) running_var = abs(running_var_tmp) inputs = [] inputs_conv_bn1 = [] in_n, in_channel, in_h, in_w = x_4d.shape inputs_conv_bn1.append(x_4d.reshape( in_n, in_channel // block_size, block_size, in_h, in_w).transpose( 0, 1, 3, 4, 2).copy()) weight_n, weight_channel, weight_h, weight_w = filter_conv.shape inputs_conv_bn1.append(( filter_conv.reshape( weight_n, weight_channel // block_size, block_size, weight_h, weight_w).transpose(1, 3, 4, 0, 2).copy() ).reshape( weight_channel // block_size * weight_h * weight_w, weight_n // block_size, block_size, block_size)) if use_bias: bias_n = bias.shape[0] inputs_conv_bn1.append( bias.reshape(1, bias_n // block_size, 1, 1, block_size)) inputs.append(inputs_conv_bn1) if has_add: ob_data = gen_inputs_directly([bn_shape], [dtype]) inputs_bn2_fusion = (ob_data, gamma, beta, running_mean, running_var) to_pass_ins = (x_4d, filter_conv, bias, gamma, beta, running_mean, running_var, ob_data) else: inputs_bn2_fusion = (gamma, beta, running_mean, running_var) to_pass_ins = (x_4d, filter_conv, bias, gamma, beta, running_mean, running_var, None) inputs.append(inputs_bn2_fusion) expects = benchmark(*to_pass_ins, pad, stride, dilation, momentum, eps, has_add, has_relu) output_buffers = [] output_buffers.append(tuple( [np.full(expects[0].shape, 0.0, dtype)] + [np.full(mid_shape, 0.0, mid_dtype)] * 2)) output_buffers.append(tuple( [np.full(mid_shape, 0.0, mid_dtype)] * 3)) output_buffers.append(malloc_out_buffer(expects[1])) return inputs, tuple(output_buffers), expects
Python
def conv_bn_fusion_run(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias=False, momentum=0.9, eps=1e-3, attrs=None): """test run function for conv bn fusion""" ########################################################################### # compile each kernel ########################################################################### conv_in_shapes, conv_in_dtypes, conv_op_attrs, _, shape = \ get_convbn1_compile_param( fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias) # conv + bn1 + bn2 + bn3 # conv mod_conv = utils.op_build_test(Conv, [conv_in_shapes], ['float16'], op_attrs=conv_op_attrs, kernel_name="conv_whole", attrs=attrs.copy()) in_shapes_bn1, in_dtypes_bn1 = get_bn_split_param(shape, dtype, 1) mod_bn1 = utils.op_build_test(FusedBn1, in_shapes_bn1, in_dtypes_bn1, kernel_name="fused_bn1_whole", attrs=attrs.copy()) in_shapes_bn2, in_dtypes_bn2 = get_bn_split_param(shape, dtype, 2) mod_bn2 = utils.op_build_test(FusedBn2, in_shapes_bn2, in_dtypes_bn2, op_attrs=[momentum], kernel_name="fused_bn2_whole", attrs=attrs.copy()) in_shapes_bn3, in_dtypes_bn3 = get_bn_split_param(shape, dtype, 3) mod_bn3 = utils.op_build_test(FusedBn3, in_shapes_bn3, in_dtypes_bn3, op_attrs=[eps], kernel_name="fused_bn3_whole", attrs=attrs.copy()) ########################################################################### # following run the kernel ########################################################################### inputs, output_buffers, expects = \ fusion_gen_data(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias, momentum, eps, False, False) inplace_binds = ((2, 1), (3, 2)) output_places = list(range(-len(output_buffers[1]), 0)) if inplace_binds is not None: for bind in inplace_binds: output_places[bind[1]] = bind[0] # origin run conv_out = utils.mod_launch(mod_conv, [*inputs[0], output_buffers[0][0]], expect=expects) bn1_out_buffers = tuple( [np.full([shape[0], shape[1], 1, 1, shape[4]], 0.0, "float32")] * 2) bn1_outs = utils.mod_launch(mod_bn1, [conv_out, *bn1_out_buffers], outputs=list(range(-len(bn1_out_buffers), 0))) bn2_out_buffers = tuple( [np.full([1, shape[1], 1, 1, shape[4]], 0.0, "float32")] * 4) bn2_inplace_binds = ((2, 2), (3, 3)) output_places_bn2 = list(range(-len(bn2_out_buffers), 0)) if bn2_inplace_binds is not None: for bind in bn2_inplace_binds: output_places_bn2[bind[1]] = bind[0] bn2_outs = utils.mod_launch(mod_bn2, [bn1_outs[0], bn1_outs[1], *inputs[1][2:], *bn2_out_buffers], outputs=output_places_bn2, expect=expects) bn3_outs = utils.mod_launch(mod_bn3, [conv_out, bn2_outs[0], bn2_outs[1], *inputs[1][:2], *output_buffers[2]], outputs=list(range(-len(output_buffers[2]), 0)), expect=expects) origin_outputs = (conv_out, bn3_outs, bn2_outs[2], bn2_outs[3], bn2_outs[0], bn2_outs[1]) cmp_res_origin = compare_result(origin_outputs, expects, dtype) return inputs, origin_outputs, expects, all(cmp_res_origin)
def conv_bn_fusion_run(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias=False, momentum=0.9, eps=1e-3, attrs=None): """test run function for conv bn fusion""" ########################################################################### # compile each kernel ########################################################################### conv_in_shapes, conv_in_dtypes, conv_op_attrs, _, shape = \ get_convbn1_compile_param( fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias) # conv + bn1 + bn2 + bn3 # conv mod_conv = utils.op_build_test(Conv, [conv_in_shapes], ['float16'], op_attrs=conv_op_attrs, kernel_name="conv_whole", attrs=attrs.copy()) in_shapes_bn1, in_dtypes_bn1 = get_bn_split_param(shape, dtype, 1) mod_bn1 = utils.op_build_test(FusedBn1, in_shapes_bn1, in_dtypes_bn1, kernel_name="fused_bn1_whole", attrs=attrs.copy()) in_shapes_bn2, in_dtypes_bn2 = get_bn_split_param(shape, dtype, 2) mod_bn2 = utils.op_build_test(FusedBn2, in_shapes_bn2, in_dtypes_bn2, op_attrs=[momentum], kernel_name="fused_bn2_whole", attrs=attrs.copy()) in_shapes_bn3, in_dtypes_bn3 = get_bn_split_param(shape, dtype, 3) mod_bn3 = utils.op_build_test(FusedBn3, in_shapes_bn3, in_dtypes_bn3, op_attrs=[eps], kernel_name="fused_bn3_whole", attrs=attrs.copy()) ########################################################################### # following run the kernel ########################################################################### inputs, output_buffers, expects = \ fusion_gen_data(fm_shape, filter_shape, dtype, pad, stride, dilation, use_bias, momentum, eps, False, False) inplace_binds = ((2, 1), (3, 2)) output_places = list(range(-len(output_buffers[1]), 0)) if inplace_binds is not None: for bind in inplace_binds: output_places[bind[1]] = bind[0] # origin run conv_out = utils.mod_launch(mod_conv, [*inputs[0], output_buffers[0][0]], expect=expects) bn1_out_buffers = tuple( [np.full([shape[0], shape[1], 1, 1, shape[4]], 0.0, "float32")] * 2) bn1_outs = utils.mod_launch(mod_bn1, [conv_out, *bn1_out_buffers], outputs=list(range(-len(bn1_out_buffers), 0))) bn2_out_buffers = tuple( [np.full([1, shape[1], 1, 1, shape[4]], 0.0, "float32")] * 4) bn2_inplace_binds = ((2, 2), (3, 3)) output_places_bn2 = list(range(-len(bn2_out_buffers), 0)) if bn2_inplace_binds is not None: for bind in bn2_inplace_binds: output_places_bn2[bind[1]] = bind[0] bn2_outs = utils.mod_launch(mod_bn2, [bn1_outs[0], bn1_outs[1], *inputs[1][2:], *bn2_out_buffers], outputs=output_places_bn2, expect=expects) bn3_outs = utils.mod_launch(mod_bn3, [conv_out, bn2_outs[0], bn2_outs[1], *inputs[1][:2], *output_buffers[2]], outputs=list(range(-len(output_buffers[2]), 0)), expect=expects) origin_outputs = (conv_out, bn3_outs, bn2_outs[2], bn2_outs[3], bn2_outs[0], bn2_outs[1]) cmp_res_origin = compare_result(origin_outputs, expects, dtype) return inputs, origin_outputs, expects, all(cmp_res_origin)
Python
def Argmax(data, axis, target=utils.CCE): """ Calculate argmax value on specific axis. Args: data (tvm.tensor.Tensor): Input tensor. axis (int): Specifies which axis to reduce. Returns: Tensor as maximum number indexes. Supported Platforms: 'Ascend' """ res, attrs = common(data, axis, "max") return res, attrs
def Argmax(data, axis, target=utils.CCE): """ Calculate argmax value on specific axis. Args: data (tvm.tensor.Tensor): Input tensor. axis (int): Specifies which axis to reduce. Returns: Tensor as maximum number indexes. Supported Platforms: 'Ascend' """ res, attrs = common(data, axis, "max") return res, attrs
Python
def AtanGrad(head, input_x, target=utils.CCE): """ Compute gradient of input_x in atan. .. math:: dx = \\frac{1}{1 + x^2} \\cdot dy Args: head (tvm.tensor.Tensor): Gradient tensor of forward's output with the same shape and dtype as input_x. input_x (tvm.tensor.Tensor): Forward's input tensor support float16 and float32. Returns: A tvm.tensor.Tensor as gradient of forward's input. Supported Platforms: 'Ascend' """ utils.elemwise_shape_check(head.shape, input_x.shape) utils.elemwise_dtype_check(head.dtype, input_x.dtype, utils.DtypeForDavinci.ALL_FLOAT) dtype = input_x.dtype tensor_one = dc.one_const(dtype) def _compute(*i): return tensor_one / (tensor_one + input_x(*i) * input_x(*i)) * head(*i) out_tensor = tvm.compute(input_x.shape, _compute, name="out") return out_tensor
def AtanGrad(head, input_x, target=utils.CCE): """ Compute gradient of input_x in atan. .. math:: dx = \\frac{1}{1 + x^2} \\cdot dy Args: head (tvm.tensor.Tensor): Gradient tensor of forward's output with the same shape and dtype as input_x. input_x (tvm.tensor.Tensor): Forward's input tensor support float16 and float32. Returns: A tvm.tensor.Tensor as gradient of forward's input. Supported Platforms: 'Ascend' """ utils.elemwise_shape_check(head.shape, input_x.shape) utils.elemwise_dtype_check(head.dtype, input_x.dtype, utils.DtypeForDavinci.ALL_FLOAT) dtype = input_x.dtype tensor_one = dc.one_const(dtype) def _compute(*i): return tensor_one / (tensor_one + input_x(*i) * input_x(*i)) * head(*i) out_tensor = tvm.compute(input_x.shape, _compute, name="out") return out_tensor
Python
def dump_cpu_meta(mod, kernel_name): """ Function for dumping cpu meta. Args: mod: the module code of cpu. """ title_dict = dict() # kernel name code = mod.get_source() title_dict["kernelName"] = kernel_name + "_kernel" #thread number thread_num = "null" title_dict["threadNumber"] = thread_num #meta path path_name = get_kernel_meta_path() meta_path = os.path.realpath(path_name) if not os.path.isdir(meta_path): os.makedirs(meta_path, exist_ok=True) # save libraries to kernel meta obj_file = os.path.join(meta_path, kernel_name + '.o') lib_file = os.path.join(meta_path, kernel_name + '.so') mod.save(obj_file, 'k') mod.export_library(lib_file) # sha256 of files obj_sha256 = hashlib.sha256() lib_sha256 = hashlib.sha256() with open(obj_file, 'rb') as f: obj_sha256.update(f.read()) with open(lib_file, 'rb') as f: lib_sha256.update(f.read()) obj_hash_str = obj_sha256.hexdigest() lib_hash_str = lib_sha256.hexdigest() title_dict["objSha256"] = obj_hash_str title_dict["sha256"] = lib_hash_str # save json file to kernel meta json_file = os.path.join(meta_path, kernel_name + ".json") write_code(title_dict, json_file)
def dump_cpu_meta(mod, kernel_name): """ Function for dumping cpu meta. Args: mod: the module code of cpu. """ title_dict = dict() # kernel name code = mod.get_source() title_dict["kernelName"] = kernel_name + "_kernel" #thread number thread_num = "null" title_dict["threadNumber"] = thread_num #meta path path_name = get_kernel_meta_path() meta_path = os.path.realpath(path_name) if not os.path.isdir(meta_path): os.makedirs(meta_path, exist_ok=True) # save libraries to kernel meta obj_file = os.path.join(meta_path, kernel_name + '.o') lib_file = os.path.join(meta_path, kernel_name + '.so') mod.save(obj_file, 'k') mod.export_library(lib_file) # sha256 of files obj_sha256 = hashlib.sha256() lib_sha256 = hashlib.sha256() with open(obj_file, 'rb') as f: obj_sha256.update(f.read()) with open(lib_file, 'rb') as f: lib_sha256.update(f.read()) obj_hash_str = obj_sha256.hexdigest() lib_hash_str = lib_sha256.hexdigest() title_dict["objSha256"] = obj_hash_str title_dict["sha256"] = lib_hash_str # save json file to kernel meta json_file = os.path.join(meta_path, kernel_name + ".json") write_code(title_dict, json_file)
Python
def apply_centered_rms_prop(var, mg, ms, mom, grad, lr, momentum, rho, epsilon, target=utils.CCE): """ Update `var` according to the centered RMSProp algorithm. out_mean_grad = decay * mg + (1-decay) * grad out_mean_square = decay * ms + (1-decay) * grad * grad out_mom = momentum * mom + lr * grad / sqrt(out_mean_square - out_mean_grad^2 + epsilon) out_var = var - out_mom Args: var (tvm.tensor.Tensor): Input data of type float16 or float32. mg (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. ms (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. mom (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. grad (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. momentum (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. rho (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. epsilon (float): A scalar tensor of the same type as `var`. Returns: tvm.tensor.Tensor, updated var. tvm.tensor.Tensor, updated mean_grad. tvm.tensor.Tensor, updated mean_square. tvm.tensor.Tensor, updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.ALL_FLOAT) for i in (mg, ms, mom, lr, rho, momentum, grad): utils.elemwise_dtype_check(var.dtype, i.dtype) for i in (mg, ms, mom, grad): utils.elemwise_shape_check(var.shape, i.shape) for i in (lr, rho, momentum): if tuple(get_shape(i)) != (1,): raise RuntimeError("lr, rho and momentum only support scalar tensor.") if epsilon <= 0: raise ValueError("epsilon should be greater than 0.") out_var, out_mg, out_ms, out_mom = _apply_centered_rms_prop_compute( var, mg, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_mg, binds_info2 = TensorUtils.inplace_set(mg, out_mg, "mg_buf") out_ms, binds_info3 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info4 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) binds_info.update(binds_info4) attrs = {utils.BINDS: binds_info} return out_var, out_mg, out_ms, out_mom, attrs
def apply_centered_rms_prop(var, mg, ms, mom, grad, lr, momentum, rho, epsilon, target=utils.CCE): """ Update `var` according to the centered RMSProp algorithm. out_mean_grad = decay * mg + (1-decay) * grad out_mean_square = decay * ms + (1-decay) * grad * grad out_mom = momentum * mom + lr * grad / sqrt(out_mean_square - out_mean_grad^2 + epsilon) out_var = var - out_mom Args: var (tvm.tensor.Tensor): Input data of type float16 or float32. mg (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. ms (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. mom (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. grad (tvm.tensor.Tensor): A tensor of the same type and shape as `var`. lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. momentum (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. rho (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. epsilon (float): A scalar tensor of the same type as `var`. Returns: tvm.tensor.Tensor, updated var. tvm.tensor.Tensor, updated mean_grad. tvm.tensor.Tensor, updated mean_square. tvm.tensor.Tensor, updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.ALL_FLOAT) for i in (mg, ms, mom, lr, rho, momentum, grad): utils.elemwise_dtype_check(var.dtype, i.dtype) for i in (mg, ms, mom, grad): utils.elemwise_shape_check(var.shape, i.shape) for i in (lr, rho, momentum): if tuple(get_shape(i)) != (1,): raise RuntimeError("lr, rho and momentum only support scalar tensor.") if epsilon <= 0: raise ValueError("epsilon should be greater than 0.") out_var, out_mg, out_ms, out_mom = _apply_centered_rms_prop_compute( var, mg, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_mg, binds_info2 = TensorUtils.inplace_set(mg, out_mg, "mg_buf") out_ms, binds_info3 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info4 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) binds_info.update(binds_info4) attrs = {utils.BINDS: binds_info} return out_var, out_mg, out_ms, out_mom, attrs
Python
def segment_max(data, segment_ids, num_segments): """ Computes the max value along segment_ids of a akg.tvm.tensor Args: data: akg.tvm.Tensor of type "float16", "float32" segment_ids: akg.tvm.Tensor of type int32, sorted Returns: akg.tvm.Tensor of same shape and type as data """ d_dtype = data.dtype utils.ops_dtype_check(d_dtype, utils.DtypeForDavinci.ALL_FLOAT) d_shape = [x.value for x in data.shape] utils.check_shape(d_shape) s_shape = segment_ids.shape utils.check_shape(s_shape) new_segment_ids, idx = gen_ids(segment_ids) output_shape = (1, ) + tuple(d_shape[len(s_shape):]) zero_data = akg.tvm.compute(output_shape, lambda*i: akg.tvm.const(0.0, d_dtype), name = "zero") data_list = Split(data, new_segment_ids) out_n = num_segments out = [] j = 0 for i in range(0, out_n): if i in idx: tmp = ReduceMax(data_list[j], 0, True, target=utils.CCE) out.append(tmp) j = j + 1 else: out.append(zero_data) res = Concat(out, 0) return res
def segment_max(data, segment_ids, num_segments): """ Computes the max value along segment_ids of a akg.tvm.tensor Args: data: akg.tvm.Tensor of type "float16", "float32" segment_ids: akg.tvm.Tensor of type int32, sorted Returns: akg.tvm.Tensor of same shape and type as data """ d_dtype = data.dtype utils.ops_dtype_check(d_dtype, utils.DtypeForDavinci.ALL_FLOAT) d_shape = [x.value for x in data.shape] utils.check_shape(d_shape) s_shape = segment_ids.shape utils.check_shape(s_shape) new_segment_ids, idx = gen_ids(segment_ids) output_shape = (1, ) + tuple(d_shape[len(s_shape):]) zero_data = akg.tvm.compute(output_shape, lambda*i: akg.tvm.const(0.0, d_dtype), name = "zero") data_list = Split(data, new_segment_ids) out_n = num_segments out = [] j = 0 for i in range(0, out_n): if i in idx: tmp = ReduceMax(data_list[j], 0, True, target=utils.CCE) out.append(tmp) j = j + 1 else: out.append(zero_data) res = Concat(out, 0) return res
Python
def mean_dynamic_tiling_strategy(tensor, axis): """custom tiling for mean with dynamic shape""" strategy = list() inner_most_to_full = True resnet_inner_most_axis_pos = 4 reduce_axis_to_1 = True reduce_axis_to_no_iso = False multicore_axis_to_1 = True resnet_outer_most_axis_pos = 0 if inner_most_to_full: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=resnet_inner_most_axis_pos) if reduce_axis_to_1: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=[1 for _ in axis], constraints=ct_util.TileConstraint.FACTOR, tensor_pos=axis) elif reduce_axis_to_no_iso: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=[1 for _ in axis], constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=axis) if multicore_axis_to_1: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=resnet_outer_most_axis_pos) return strategy
def mean_dynamic_tiling_strategy(tensor, axis): """custom tiling for mean with dynamic shape""" strategy = list() inner_most_to_full = True resnet_inner_most_axis_pos = 4 reduce_axis_to_1 = True reduce_axis_to_no_iso = False multicore_axis_to_1 = True resnet_outer_most_axis_pos = 0 if inner_most_to_full: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=resnet_inner_most_axis_pos) if reduce_axis_to_1: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=[1 for _ in axis], constraints=ct_util.TileConstraint.FACTOR, tensor_pos=axis) elif reduce_axis_to_no_iso: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=[1 for _ in axis], constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=axis) if multicore_axis_to_1: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=resnet_outer_most_axis_pos) return strategy
Python
def Mean(data, axis=None, keepdims=False, target=utils.CCE): """ Computes the mean of the values of a Tensor over the whole dataset. Note: If the tuple's elements are unsorted, this function will call preprocess_axis firstly to let these elements sorted. if tuple is empty, this function will compute all elements' sum. if the data type is folat 16 and the whole dim not less than 65536, this function will compute the mean by divide 65535 first to avoid whole dim too large. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. axis (Union[list, tuple, int, None]): If the tuple is empty, the axis equal to None. keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length. Returns: tvm.tensor.Tensor, has the same type as data. If keepdims equal to True, all reduced dimensions are retained with length 1. else these reduced axis will be eliminate. Supported Platforms: 'Ascend' """ # Check types utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT) # Check shape shape = ft_util.get_shape(data) utils.reduce_axis_check(shape, axis) axis = ft_util.refine_reduce_axis(data, axis) count = 1 for i in axis: count *= shape[i] output = Sum(data, axis, keepdims, target=target) if shape_is_dynamic(data): res = akg.tvm.compute(output.shape, lambda *i: akg.lang.ascend.divide_var(output(*i), count), name="res") else: res = akg.topi.divide(output, count) attrs = get_attrs(data) if shape_is_dynamic(data): attrs["custom_tiling"] = mean_dynamic_tiling_strategy(data, axis) return res, attrs
def Mean(data, axis=None, keepdims=False, target=utils.CCE): """ Computes the mean of the values of a Tensor over the whole dataset. Note: If the tuple's elements are unsorted, this function will call preprocess_axis firstly to let these elements sorted. if tuple is empty, this function will compute all elements' sum. if the data type is folat 16 and the whole dim not less than 65536, this function will compute the mean by divide 65535 first to avoid whole dim too large. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. axis (Union[list, tuple, int, None]): If the tuple is empty, the axis equal to None. keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length. Returns: tvm.tensor.Tensor, has the same type as data. If keepdims equal to True, all reduced dimensions are retained with length 1. else these reduced axis will be eliminate. Supported Platforms: 'Ascend' """ # Check types utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT) # Check shape shape = ft_util.get_shape(data) utils.reduce_axis_check(shape, axis) axis = ft_util.refine_reduce_axis(data, axis) count = 1 for i in axis: count *= shape[i] output = Sum(data, axis, keepdims, target=target) if shape_is_dynamic(data): res = akg.tvm.compute(output.shape, lambda *i: akg.lang.ascend.divide_var(output(*i), count), name="res") else: res = akg.topi.divide(output, count) attrs = get_attrs(data) if shape_is_dynamic(data): attrs["custom_tiling"] = mean_dynamic_tiling_strategy(data, axis) return res, attrs
Python
def process_results(directory): """ Input: directory: the directory of 'output_gpu', which contains csv files for all networks fused operators. Return: the result data of all operators in directory of 'output_gpu'. """ csv_files = get_output_files(directory) results = pd.DataFrame() logging.info("total numbers of csv files: %s", len(csv_files)) for item in csv_files: op_name = os.path.basename(item).split('.')[0] network = os.path.dirname(item).split('/')[-2] test_level = os.path.dirname(item).split('/')[-1] with open(item, 'rb') as f: lines = f.readlines() if len(lines) < 5: logging.error("Have error in file: %s", item) info = 'Failed' s = pd.Series({'Network': network, 'Level': test_level, 'Name': op_name, 'Calls': info, 'Avg(us)': info, 'Max(us)': info, 'Min(us)': info}) results = results.append(s, ignore_index=True) continue results = results.append(find_op_profiling(item, network, test_level, op_name), ignore_index=True) time_suffix = time.strftime("%Y%m%d", time.localtime()) for col in list(results.columns): if col in ['Calls', 'Avg(us)', 'Max(us)', 'Min(us)']: results.rename(columns={col: col + '_' + time_suffix}, inplace=True) return results
def process_results(directory): """ Input: directory: the directory of 'output_gpu', which contains csv files for all networks fused operators. Return: the result data of all operators in directory of 'output_gpu'. """ csv_files = get_output_files(directory) results = pd.DataFrame() logging.info("total numbers of csv files: %s", len(csv_files)) for item in csv_files: op_name = os.path.basename(item).split('.')[0] network = os.path.dirname(item).split('/')[-2] test_level = os.path.dirname(item).split('/')[-1] with open(item, 'rb') as f: lines = f.readlines() if len(lines) < 5: logging.error("Have error in file: %s", item) info = 'Failed' s = pd.Series({'Network': network, 'Level': test_level, 'Name': op_name, 'Calls': info, 'Avg(us)': info, 'Max(us)': info, 'Min(us)': info}) results = results.append(s, ignore_index=True) continue results = results.append(find_op_profiling(item, network, test_level, op_name), ignore_index=True) time_suffix = time.strftime("%Y%m%d", time.localtime()) for col in list(results.columns): if col in ['Calls', 'Avg(us)', 'Max(us)', 'Min(us)']: results.rename(columns={col: col + '_' + time_suffix}, inplace=True) return results
Python
def Select(condition, x1, x2, target=utils.CCE): """ Selects elements from x1 or x2, depending on condition. Note: every parmas' shape need legal, can support condition's shape broadcast. Args: condition (tvm.tensor.Tensor): Tensor of type int8, int32, must be 0 or 1. x1 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8. x2 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8. Returns: tvm.tensor.Tensor, has the same type and shape as x1. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape_x1 = get_shape(x1) shape_x2 = get_shape(x2) con_shape = get_shape(condition) utils.elemwise_shape_check(shape_x1, shape_x2) utils.elemwise_dtype_check(x1.dtype, x2.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8]) utils.ops_dtype_check(condition.dtype, [utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32]) utils.auto_broadcast_check(con_shape, shape_x1) res = select_compute(condition, x1, x2, target) return res
def Select(condition, x1, x2, target=utils.CCE): """ Selects elements from x1 or x2, depending on condition. Note: every parmas' shape need legal, can support condition's shape broadcast. Args: condition (tvm.tensor.Tensor): Tensor of type int8, int32, must be 0 or 1. x1 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8. x2 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8. Returns: tvm.tensor.Tensor, has the same type and shape as x1. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape_x1 = get_shape(x1) shape_x2 = get_shape(x2) con_shape = get_shape(condition) utils.elemwise_shape_check(shape_x1, shape_x2) utils.elemwise_dtype_check(x1.dtype, x2.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8]) utils.ops_dtype_check(condition.dtype, [utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32]) utils.auto_broadcast_check(con_shape, shape_x1) res = select_compute(condition, x1, x2, target) return res
Python
def BiasAddAd(head, input_shape, data_format, target=utils.CCE): """ Compute gradient for bias_add operator using automatic differentiate. Args: head (tvm.tensor.Tensor): Input tensor. input_shape (Union[list, tuple]): Input shape of head. data_format (str): Data format of input tensors. Returns: tvm.tensor.Tensor of same shape and type as head. Supported Platforms: 'Ascend' """ check_list = ["NHWC", "NC1HWC0", "DefaultFormat"] if data_format not in check_list: raise RuntimeError("bias_add_grad only support %s while dataformat is %s" % (",".join(check_list), data_format)) utils.check_shape(head.shape) shape1 = [x.value for x in head.shape] utils.davinci_format_check(shape1, data_format) a = akg.tvm.placeholder(head.shape, head.dtype, "A") if data_format == "NC1HWC0": bias_shape = (1, head.shape[1], 1, 1, head.shape[4]) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") elif data_format == "NHWC": bias_shape = (input_shape[-1],) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") else: bias_shape = (input_shape[1],) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") c = BiasAdd(a, b, data_format) jacs = list(akg.differentiate(c, [b], head)) attrs = {} return jacs[0], attrs
def BiasAddAd(head, input_shape, data_format, target=utils.CCE): """ Compute gradient for bias_add operator using automatic differentiate. Args: head (tvm.tensor.Tensor): Input tensor. input_shape (Union[list, tuple]): Input shape of head. data_format (str): Data format of input tensors. Returns: tvm.tensor.Tensor of same shape and type as head. Supported Platforms: 'Ascend' """ check_list = ["NHWC", "NC1HWC0", "DefaultFormat"] if data_format not in check_list: raise RuntimeError("bias_add_grad only support %s while dataformat is %s" % (",".join(check_list), data_format)) utils.check_shape(head.shape) shape1 = [x.value for x in head.shape] utils.davinci_format_check(shape1, data_format) a = akg.tvm.placeholder(head.shape, head.dtype, "A") if data_format == "NC1HWC0": bias_shape = (1, head.shape[1], 1, 1, head.shape[4]) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") elif data_format == "NHWC": bias_shape = (input_shape[-1],) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") else: bias_shape = (input_shape[1],) b = akg.tvm.placeholder(bias_shape, head.dtype, "B") c = BiasAdd(a, b, data_format) jacs = list(akg.differentiate(c, [b], head)) attrs = {} return jacs[0], attrs
Python
def avgpool_ad(head, data, kernel, stride, pad): """Compute gradient of avgpool operator using automatic differentiate.""" attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False} avgpool_fwd, _ = Avgpool(data, kernel, stride, pad) [dl_ddata] = akg.differentiate(avgpool_fwd, [data], head) return dl_ddata, attrs
def avgpool_ad(head, data, kernel, stride, pad): """Compute gradient of avgpool operator using automatic differentiate.""" attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False} avgpool_fwd, _ = Avgpool(data, kernel, stride, pad) [dl_ddata] = akg.differentiate(avgpool_fwd, [data], head) return dl_ddata, attrs
Python
def avgpool_ad_no_custom_diff_manual_schedule(head, data, kernel, stride, pad): """automatic differentiate of avgpool with manual schedule.""" attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False} avgpool_fwd, _ = Avgpool(data, kernel, stride, pad) [dl_ddata] = akg.differentiate(avgpool_fwd, [data], head) # schedule for differetiation operation s = akg.tvm.create_schedule([dl_ddata.op]) kh, kw = kernel shape = get_shape(data) ib, ic1, ih, iw, ic0 = shape if kh == ih and kw == iw: pad2d_input_2_grad = dl_ddata res_value_res_grad = pad2d_input_2_grad.op.input_tensors[0] head = res_value_res_grad.op.input_tensors[0] def comp_func(s): head_ub = s.cache_read(head, "local.UB", [res_value_res_grad]) result_ub = s.cache_write(pad2d_input_2_grad, "local.UB") s[res_value_res_grad].set_scope("local.UB") b, c1, h, w, c0 = pad2d_input_2_grad.op.axis s[head_ub].compute_at(s[pad2d_input_2_grad], b) s[res_value_res_grad].compute_at(s[pad2d_input_2_grad], b) s[result_ub].compute_at(s[pad2d_input_2_grad], b) else: pad2d_input_2_grad = dl_ddata Broadcast_jac = pad2d_input_2_grad.op.input_tensors[0] res_value_res_grad = Broadcast_jac.op.input_tensors[0] head = res_value_res_grad.op.input_tensors[0] def comp_func(s): head_ub = s.cache_read(head, "local.UB", [res_value_res_grad]) result_ub = s.cache_write(pad2d_input_2_grad, "local.UB") s[Broadcast_jac].set_scope("local.UB") s[res_value_res_grad].set_scope("local.UB") b, c1, h, w, c0 = result_ub.op.axis s[result_ub].reorder(*result_ub.op.reduce_axis, b, c1, h, w, c0) s[Broadcast_jac].compute_at(s[result_ub], b) return dl_ddata, comp_func, attrs
def avgpool_ad_no_custom_diff_manual_schedule(head, data, kernel, stride, pad): """automatic differentiate of avgpool with manual schedule.""" attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False} avgpool_fwd, _ = Avgpool(data, kernel, stride, pad) [dl_ddata] = akg.differentiate(avgpool_fwd, [data], head) # schedule for differetiation operation s = akg.tvm.create_schedule([dl_ddata.op]) kh, kw = kernel shape = get_shape(data) ib, ic1, ih, iw, ic0 = shape if kh == ih and kw == iw: pad2d_input_2_grad = dl_ddata res_value_res_grad = pad2d_input_2_grad.op.input_tensors[0] head = res_value_res_grad.op.input_tensors[0] def comp_func(s): head_ub = s.cache_read(head, "local.UB", [res_value_res_grad]) result_ub = s.cache_write(pad2d_input_2_grad, "local.UB") s[res_value_res_grad].set_scope("local.UB") b, c1, h, w, c0 = pad2d_input_2_grad.op.axis s[head_ub].compute_at(s[pad2d_input_2_grad], b) s[res_value_res_grad].compute_at(s[pad2d_input_2_grad], b) s[result_ub].compute_at(s[pad2d_input_2_grad], b) else: pad2d_input_2_grad = dl_ddata Broadcast_jac = pad2d_input_2_grad.op.input_tensors[0] res_value_res_grad = Broadcast_jac.op.input_tensors[0] head = res_value_res_grad.op.input_tensors[0] def comp_func(s): head_ub = s.cache_read(head, "local.UB", [res_value_res_grad]) result_ub = s.cache_write(pad2d_input_2_grad, "local.UB") s[Broadcast_jac].set_scope("local.UB") s[res_value_res_grad].set_scope("local.UB") b, c1, h, w, c0 = result_ub.op.axis s[result_ub].reorder(*result_ub.op.reduce_axis, b, c1, h, w, c0) s[Broadcast_jac].compute_at(s[result_ub], b) return dl_ddata, comp_func, attrs
Python
def GreaterEqual(data1, data2, target=utils.CCE): """ Check whether input1 greaterquals to input2. Args: input1 (tvm.tensor.Tensor): Tensor. input2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If input1 greaterquals to input2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) # check shapes shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] shapes = [shape1, shape2] for i in range(len(shapes)): utils.check_shape(shapes[i]) # check types dtype = data1.dtype dtype2 = data2.dtype utils.elemwise_dtype_check(dtype, dtype2) if target == utils.CCE: utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16) res = akg.topi.greater_equal(data1, data2) return res
def GreaterEqual(data1, data2, target=utils.CCE): """ Check whether input1 greaterquals to input2. Args: input1 (tvm.tensor.Tensor): Tensor. input2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If input1 greaterquals to input2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) # check shapes shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] shapes = [shape1, shape2] for i in range(len(shapes)): utils.check_shape(shapes[i]) # check types dtype = data1.dtype dtype2 = data2.dtype utils.elemwise_dtype_check(dtype, dtype2) if target == utils.CCE: utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16) res = akg.topi.greater_equal(data1, data2) return res
Python
def MatMul(x, y, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False, transpose_y=False, attrs=None, target=utils.CCE): """ Computes matrix multiplication x * y + b. Args: x: akg.tvm.Tensor of type int8, uint8, float16, float32, int32. Left matrix. y: akg.tvm.Tensor of same type as x. Right matrix. b: akg.tvm.Tensor of same type as x. Bias tensor. out_dtype: str. Data type of output tensor. left_format: str. Data format of left matrix. Supported data format list ["zZ", "nZ", "zN"]. right_format: str. Data format of right matrix. Supported data format list ["zZ", "nZ", "zN"]. out_format: str. Data format of output tensor. Supported data format list ["zZ", "nZ", "zN"]. transpose_x: Boolean. Specifies whether x is transposed or not. transpose_y: Boolean. Specifies whether y is transposed or not. attrs: Dict. Used in matmul computation. Note: before call matmul, 2d to Fractal is needed. Returns: akg.tvm.Tensor with type out_dtype. Supported Platforms: 'Ascend' """ utils.ops_dtype_check([x.dtype, y.dtype], utils.DtypeForDavinci.ALL_FLOAT) shape_x = [shape_element.value for shape_element in x.shape] utils.check_shape(shape_x) shape_y = [shape_element.value for shape_element in y.shape] utils.check_shape(shape_y) if left_format not in ["zZ", "zN"]: raise ValueError("unsupport left_format now: %s" % left_format) if right_format not in ["nZ", "zZ", "zN"]: raise ValueError("unsupport right_format now: %s" % right_format) if out_format not in ["zN", "zZ"]: raise ValueError("unsupport out_format now: %s" % out_format) out = matmul4D_compute(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs) attr_map = {"pragma_rmselfdep": False} dims_info, _ = matmul_set_dim(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y) attr_map["dim"] = dims_info return out, attr_map
def MatMul(x, y, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False, transpose_y=False, attrs=None, target=utils.CCE): """ Computes matrix multiplication x * y + b. Args: x: akg.tvm.Tensor of type int8, uint8, float16, float32, int32. Left matrix. y: akg.tvm.Tensor of same type as x. Right matrix. b: akg.tvm.Tensor of same type as x. Bias tensor. out_dtype: str. Data type of output tensor. left_format: str. Data format of left matrix. Supported data format list ["zZ", "nZ", "zN"]. right_format: str. Data format of right matrix. Supported data format list ["zZ", "nZ", "zN"]. out_format: str. Data format of output tensor. Supported data format list ["zZ", "nZ", "zN"]. transpose_x: Boolean. Specifies whether x is transposed or not. transpose_y: Boolean. Specifies whether y is transposed or not. attrs: Dict. Used in matmul computation. Note: before call matmul, 2d to Fractal is needed. Returns: akg.tvm.Tensor with type out_dtype. Supported Platforms: 'Ascend' """ utils.ops_dtype_check([x.dtype, y.dtype], utils.DtypeForDavinci.ALL_FLOAT) shape_x = [shape_element.value for shape_element in x.shape] utils.check_shape(shape_x) shape_y = [shape_element.value for shape_element in y.shape] utils.check_shape(shape_y) if left_format not in ["zZ", "zN"]: raise ValueError("unsupport left_format now: %s" % left_format) if right_format not in ["nZ", "zZ", "zN"]: raise ValueError("unsupport right_format now: %s" % right_format) if out_format not in ["zN", "zZ"]: raise ValueError("unsupport out_format now: %s" % out_format) out = matmul4D_compute(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs) attr_map = {"pragma_rmselfdep": False} dims_info, _ = matmul_set_dim(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y) attr_map["dim"] = dims_info return out, attr_map
Python
def focalloss_ad(head, logits, labels, gamma): """Compute gradient of focalloss operator using automatic differentiate.""" b, _ = focal_loss.focal_loss(logits, labels, gamma) jacs = akg.differentiate(b, [logits], head) return jacs[0]
def focalloss_ad(head, logits, labels, gamma): """Compute gradient of focalloss operator using automatic differentiate.""" b, _ = focal_loss.focal_loss(logits, labels, gamma) jacs = akg.differentiate(b, [logits], head) return jacs[0]
Python
def gen_data(dtype1, dtype2, shape1, shape2): """generate valid test data for cross""" input1 = random_gaussian(shape1).astype(dtype1) input2 = random_gaussian(shape2).astype(dtype2) if dtype1 in ("int8", "uint8", "int32"): # for overflow case, numpy will truncate the result, but davinci will # make it maximum or minimuam value. expect = np.cross(input1.astype("float32"), input2.astype("float32"), axisa=0, axisb=0, axisc=0) expect = np.maximum(expect, np.ones_like(expect) * np.iinfo(dtype1).min) expect = np.minimum(expect, np.ones_like(expect) * np.iinfo(dtype1).max) expect = expect.astype(dtype1) else: expect = np.cross(input1, input2, axisa=0, axisb=0, axisc=0) out_buf = np.full(expect.shape, np.nan, dtype1) return expect, (input1, input2), out_buf
def gen_data(dtype1, dtype2, shape1, shape2): """generate valid test data for cross""" input1 = random_gaussian(shape1).astype(dtype1) input2 = random_gaussian(shape2).astype(dtype2) if dtype1 in ("int8", "uint8", "int32"): # for overflow case, numpy will truncate the result, but davinci will # make it maximum or minimuam value. expect = np.cross(input1.astype("float32"), input2.astype("float32"), axisa=0, axisb=0, axisc=0) expect = np.maximum(expect, np.ones_like(expect) * np.iinfo(dtype1).min) expect = np.minimum(expect, np.ones_like(expect) * np.iinfo(dtype1).max) expect = expect.astype(dtype1) else: expect = np.cross(input1, input2, axisa=0, axisb=0, axisc=0) out_buf = np.full(expect.shape, np.nan, dtype1) return expect, (input1, input2), out_buf
Python
def Relu(inputs, target=utils.CCE): """ Compute rectified linear of input tensor. Return max(inputs, 0) element-wise. Args: inputs (tvm.tensor.Tensor): Input tensor. Returns: tvm.tensor.Tensor with the same type and shape as data. Supported Platforms: 'Ascend' """ utils.check_shape(inputs.shape) utils.ops_dtype_check(inputs.dtype, utils.DtypeForDavinci.ALL_FLOAT) output = akg.tvm.compute(inputs.shape, lambda *i: akg.tvm.max(inputs(*i), akg.tvm.const(0, inputs.dtype)), name="output") return output
def Relu(inputs, target=utils.CCE): """ Compute rectified linear of input tensor. Return max(inputs, 0) element-wise. Args: inputs (tvm.tensor.Tensor): Input tensor. Returns: tvm.tensor.Tensor with the same type and shape as data. Supported Platforms: 'Ascend' """ utils.check_shape(inputs.shape) utils.ops_dtype_check(inputs.dtype, utils.DtypeForDavinci.ALL_FLOAT) output = akg.tvm.compute(inputs.shape, lambda *i: akg.tvm.max(inputs(*i), akg.tvm.const(0, inputs.dtype)), name="output") return output
Python
def Split(data, num_or_size_splits, split_axis=0, num=None, target="cce"): """ Splits a tensor into sub tensors. Args: data: Tensor. num_or_size_splits: Integer or list. Used to split data. split_axis: Integer. The dimension along which to split. Returns: Tuple of tensor, counts of which is determined by num_or_size_splits. """ dtype = data.dtype shape = [x.value for x in data.shape] if isinstance(num_or_size_splits, (list, tuple)): if len(num_or_size_splits) >= 128: raise ValueError("Output tensors of split should not be more than 127. CCE can not support now.") if sum(num_or_size_splits) != shape[split_axis]: raise ValueError("Sum of size_split must be equal to the value of split axis.") if len(num_or_size_splits) == 1: res = akg.tvm.compute(data.shape, lambda *indice: data(*indice).astype(dtype), name='res') return res size_splits = [num_or_size_splits[0]] for i in range(len(num_or_size_splits) - 2): size_splits.append(num_or_size_splits[i + 1] + size_splits[i]) res_tmp = akg.topi.split(data, size_splits, split_axis) else: if num_or_size_splits >= 128: raise ValueError("Output tensors of split should not be more than 127. CCE can not support now.") res_tmp = akg.topi.split(data, num_or_size_splits, split_axis) # add zero for each output to avoid same op.name zero = akg.tvm.const(0, dtype=data.dtype) res = [] for item in res_tmp: item = akg.lang.ascend.vadds(item, zero) if item.dtype != dtype: item = akg.topi.cast(item, dtype) res.append(item) return res
def Split(data, num_or_size_splits, split_axis=0, num=None, target="cce"): """ Splits a tensor into sub tensors. Args: data: Tensor. num_or_size_splits: Integer or list. Used to split data. split_axis: Integer. The dimension along which to split. Returns: Tuple of tensor, counts of which is determined by num_or_size_splits. """ dtype = data.dtype shape = [x.value for x in data.shape] if isinstance(num_or_size_splits, (list, tuple)): if len(num_or_size_splits) >= 128: raise ValueError("Output tensors of split should not be more than 127. CCE can not support now.") if sum(num_or_size_splits) != shape[split_axis]: raise ValueError("Sum of size_split must be equal to the value of split axis.") if len(num_or_size_splits) == 1: res = akg.tvm.compute(data.shape, lambda *indice: data(*indice).astype(dtype), name='res') return res size_splits = [num_or_size_splits[0]] for i in range(len(num_or_size_splits) - 2): size_splits.append(num_or_size_splits[i + 1] + size_splits[i]) res_tmp = akg.topi.split(data, size_splits, split_axis) else: if num_or_size_splits >= 128: raise ValueError("Output tensors of split should not be more than 127. CCE can not support now.") res_tmp = akg.topi.split(data, num_or_size_splits, split_axis) # add zero for each output to avoid same op.name zero = akg.tvm.const(0, dtype=data.dtype) res = [] for item in res_tmp: item = akg.lang.ascend.vadds(item, zero) if item.dtype != dtype: item = akg.topi.cast(item, dtype) res.append(item) return res
Python
def composite_peel_analyze(desc, attrs): """ Analyzes the peeling space for a give json str. Args: desc: json str attrs: dict of attr Returns: CompositePeel. """ peel = CompositePeel(desc, attrs) peel.analyze() return peel
def composite_peel_analyze(desc, attrs): """ Analyzes the peeling space for a give json str. Args: desc: json str attrs: dict of attr Returns: CompositePeel. """ peel = CompositePeel(desc, attrs) peel.analyze() return peel
Python
def check_fold_dim(descs): """ Check if we can fold dim on all json str in descs, returns True only if all these json str can fold dim. Args: descs: list of json str Returns: Bool. """ func = akg.tvm.get_global_func("check_fold_dim") fold_dim = func(descs) fold_dim = bool(fold_dim) return fold_dim
def check_fold_dim(descs): """ Check if we can fold dim on all json str in descs, returns True only if all these json str can fold dim. Args: descs: list of json str Returns: Bool. """ func = akg.tvm.get_global_func("check_fold_dim") fold_dim = func(descs) fold_dim = bool(fold_dim) return fold_dim
Python
def fused_minimum_or_maximum_grad(dz, x, y, grad_x, grad_y, op_type): """ Gradient for minimum or maximum operation between two input tensors `x` and `y`. Args: dz (tvm.tensor.Tensor): Type float16, float32, int32. x (tvm.tensor.Tensor): Type float16, float32, int32. y (tvm.tensor.Tensor): Type float16, float32, int32. grad_x (bool): Whether calculate dx. grad_y (bool): Whether calculate dy. op_type (str): The type of the op, "GE" for MaximumGrad or "LE" for MinimumGrad. Note: At least one of grad_x and grad_y is True. Returns: dx, tvm.tensor.Tensor of the same type as inputs, it will be returned if grad_x is True. dy, tvm.tensor.Tensor of the same type as inputs, it will be returned if grad_y is True. """ utils.check_shape(x) utils.check_shape(y) utils.check_shape(dz) utils.ops_dtype_check([x.dtype, y.dtype, dz.dtype], [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) utils.broadcast_check(x, dz) utils.broadcast_check(y, dz) # check op types check_list = ["GE", "LE"] if op_type not in check_list: raise ValueError("FusedMinimumOrMaximumGrad only support %s while op type is %s" % (",".join(check_list), op_type)) if not grad_x and not grad_y: raise ValueError("At least one of grad_x and grad_y is True.") x_shape = get_shape(x) y_shape = get_shape(y) dz_shape = get_shape(dz) ori_dtype = dz.dtype # get greater compute x = akg.lang.ascend.broadcast(x, dz_shape) y = akg.lang.ascend.broadcast(y, dz_shape) if product_is_mini() and ori_dtype != "float16": x = Cast(x, "float16", "cce") y = Cast(y, "float16", "cce") dz = Cast(dz, "float16", "cce") elif ori_dtype == "int32": x = Cast(x, "float32", "cce") y = Cast(y, "float32", "cce") dz = Cast(dz, "float32", "cce") zero = zero_const(dz.dtype) if op_type == "LE": dx = tvm.compute(dz_shape, lambda *i: tvm.expr.Select((x(*i) <= y(*i)), dz(*i), zero), name='dx') dy = topi.subtract(dz, dx) elif op_type == "GE": dx = tvm.compute(dz_shape, lambda *i: tvm.expr.Select((x(*i) >= y(*i)), dz(*i), zero), name='dx') dy = topi.subtract(dz, dx) if dx.dtype == "float16": # cast to fp32 for higher precision of reduce_sum. if get_shape(dx) != x_shape: dx = Cast(dx, "float32", "cce") if get_shape(dy) != y_shape: dy = Cast(dy, "float32", "cce") dx = SumByShape(dx, x_shape) dy = SumByShape(dy, y_shape) if ori_dtype != dx.dtype: dx = Cast(dx, ori_dtype, "cce") if ori_dtype != dy.dtype: dy = Cast(dy, ori_dtype, "cce") attrs = get_default_attrs() if grad_x and grad_y: return dx, dy, attrs if grad_x: return dx, attrs return dy, attrs
def fused_minimum_or_maximum_grad(dz, x, y, grad_x, grad_y, op_type): """ Gradient for minimum or maximum operation between two input tensors `x` and `y`. Args: dz (tvm.tensor.Tensor): Type float16, float32, int32. x (tvm.tensor.Tensor): Type float16, float32, int32. y (tvm.tensor.Tensor): Type float16, float32, int32. grad_x (bool): Whether calculate dx. grad_y (bool): Whether calculate dy. op_type (str): The type of the op, "GE" for MaximumGrad or "LE" for MinimumGrad. Note: At least one of grad_x and grad_y is True. Returns: dx, tvm.tensor.Tensor of the same type as inputs, it will be returned if grad_x is True. dy, tvm.tensor.Tensor of the same type as inputs, it will be returned if grad_y is True. """ utils.check_shape(x) utils.check_shape(y) utils.check_shape(dz) utils.ops_dtype_check([x.dtype, y.dtype, dz.dtype], [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) utils.broadcast_check(x, dz) utils.broadcast_check(y, dz) # check op types check_list = ["GE", "LE"] if op_type not in check_list: raise ValueError("FusedMinimumOrMaximumGrad only support %s while op type is %s" % (",".join(check_list), op_type)) if not grad_x and not grad_y: raise ValueError("At least one of grad_x and grad_y is True.") x_shape = get_shape(x) y_shape = get_shape(y) dz_shape = get_shape(dz) ori_dtype = dz.dtype # get greater compute x = akg.lang.ascend.broadcast(x, dz_shape) y = akg.lang.ascend.broadcast(y, dz_shape) if product_is_mini() and ori_dtype != "float16": x = Cast(x, "float16", "cce") y = Cast(y, "float16", "cce") dz = Cast(dz, "float16", "cce") elif ori_dtype == "int32": x = Cast(x, "float32", "cce") y = Cast(y, "float32", "cce") dz = Cast(dz, "float32", "cce") zero = zero_const(dz.dtype) if op_type == "LE": dx = tvm.compute(dz_shape, lambda *i: tvm.expr.Select((x(*i) <= y(*i)), dz(*i), zero), name='dx') dy = topi.subtract(dz, dx) elif op_type == "GE": dx = tvm.compute(dz_shape, lambda *i: tvm.expr.Select((x(*i) >= y(*i)), dz(*i), zero), name='dx') dy = topi.subtract(dz, dx) if dx.dtype == "float16": # cast to fp32 for higher precision of reduce_sum. if get_shape(dx) != x_shape: dx = Cast(dx, "float32", "cce") if get_shape(dy) != y_shape: dy = Cast(dy, "float32", "cce") dx = SumByShape(dx, x_shape) dy = SumByShape(dy, y_shape) if ori_dtype != dx.dtype: dx = Cast(dx, ori_dtype, "cce") if ori_dtype != dy.dtype: dy = Cast(dy, ori_dtype, "cce") attrs = get_default_attrs() if grad_x and grad_y: return dx, dy, attrs if grad_x: return dx, attrs return dy, attrs
Python
def diagpart(data, target=utils.CCE): """ Returns the diagonal part of data. Args: data: Tensor. Returns: Tensor, has the same type as data and shape of data.shape[0:len-1]. """ shape = [x.value for x in data.shape] utils.check_shape(shape) rank = len(shape) if rank not in (2, 4, 6, 8): raise ValueError("diagpart only support even rank (2/4/6/8) while the rank is {}".format(rank)) o_shape = [] for i in range(rank // 2): if shape[i] == shape[rank // 2 + i]: o_shape.append(shape[i]) else: raise ValueError("diagpart only support square matrix while the shape is {}".format(shape)) dtype = data.dtype utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) if rank == 2: res = akg.tvm.compute(o_shape, lambda i: data[i, i]) elif rank == 4: res = akg.tvm.compute(o_shape, lambda i, j: data[i, j, i, j]) elif rank == 6: res = akg.tvm.compute(o_shape, lambda i, j, m: data[i, j, m, i, j, m]) elif rank == 8: res = akg.tvm.compute(o_shape, lambda i, j, m, n: data[i, j, m, n, i, j, m, n]) return res
def diagpart(data, target=utils.CCE): """ Returns the diagonal part of data. Args: data: Tensor. Returns: Tensor, has the same type as data and shape of data.shape[0:len-1]. """ shape = [x.value for x in data.shape] utils.check_shape(shape) rank = len(shape) if rank not in (2, 4, 6, 8): raise ValueError("diagpart only support even rank (2/4/6/8) while the rank is {}".format(rank)) o_shape = [] for i in range(rank // 2): if shape[i] == shape[rank // 2 + i]: o_shape.append(shape[i]) else: raise ValueError("diagpart only support square matrix while the shape is {}".format(shape)) dtype = data.dtype utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) if rank == 2: res = akg.tvm.compute(o_shape, lambda i: data[i, i]) elif rank == 4: res = akg.tvm.compute(o_shape, lambda i, j: data[i, j, i, j]) elif rank == 6: res = akg.tvm.compute(o_shape, lambda i, j, m: data[i, j, m, i, j, m]) elif rank == 8: res = akg.tvm.compute(o_shape, lambda i, j, m, n: data[i, j, m, n, i, j, m, n]) return res
Python
def SqueezeGrad(y_grad, x_shape, target=utils.CUDA): """ Computes gradients for squeeze op. Args: y_grad (tvm.tensor.Tensor): the gradient needed to be propagation. x_shape (Union[list, tuple]): output Tensor shape. Returns: tvm.tensor.Tensor: output gradient. """ return topi.reshape(y_grad, x_shape)
def SqueezeGrad(y_grad, x_shape, target=utils.CUDA): """ Computes gradients for squeeze op. Args: y_grad (tvm.tensor.Tensor): the gradient needed to be propagation. x_shape (Union[list, tuple]): output Tensor shape. Returns: tvm.tensor.Tensor: output gradient. """ return topi.reshape(y_grad, x_shape)
Python
def check_input_type_list_tuple(inputs, expect): """check inputs by a list or tuple of expected types.""" if not isinstance(inputs, expect[1][0]): raise RuntimeError("the input parameter %s must be (list, tuple), while" " type of input is %s" % (expect[0], type(inputs))) for inp in inputs: if not isinstance(inp, expect[1][1]): raise RuntimeError("The element in parameter %s must be %s, while " "type of input is %s" % ( expect[0], expect[1][1], type(inp)))
def check_input_type_list_tuple(inputs, expect): """check inputs by a list or tuple of expected types.""" if not isinstance(inputs, expect[1][0]): raise RuntimeError("the input parameter %s must be (list, tuple), while" " type of input is %s" % (expect[0], type(inputs))) for inp in inputs: if not isinstance(inp, expect[1][1]): raise RuntimeError("The element in parameter %s must be %s, while " "type of input is %s" % ( expect[0], expect[1][1], type(inp)))
Python
def shape_dtype_max_size_check(shape, dtype): """check validation of tensor's shape.""" if shape: for x in shape: if not isinstance(x, int): return mul = get_bytes(dtype) * int(reduce(lambda x, y: int(x) * int(y), shape)) if mul > MAX_DATA_SIZE: error_msg = "*".join([str(sh) for sh in shape]) raise RuntimeError("Invalid shape, data is {} bytes ({}), which " "exceed max data size {} bytes" .format(mul, error_msg, MAX_DATA_SIZE))
def shape_dtype_max_size_check(shape, dtype): """check validation of tensor's shape.""" if shape: for x in shape: if not isinstance(x, int): return mul = get_bytes(dtype) * int(reduce(lambda x, y: int(x) * int(y), shape)) if mul > MAX_DATA_SIZE: error_msg = "*".join([str(sh) for sh in shape]) raise RuntimeError("Invalid shape, data is {} bytes ({}), which " "exceed max data size {} bytes" .format(mul, error_msg, MAX_DATA_SIZE))
Python
def tensor_max_size_check(tensor): """check validation of tensor's shape.""" if not isinstance(tensor, akg.tvm.tensor.Tensor): raise RuntimeError("tensor should be an akg.tvm.tensor.Tensor, but got " "type {}".format(type(tensor))) shape = get_shape(tensor) dtype = tensor.dtype shape_dtype_max_size_check(shape, dtype)
def tensor_max_size_check(tensor): """check validation of tensor's shape.""" if not isinstance(tensor, akg.tvm.tensor.Tensor): raise RuntimeError("tensor should be an akg.tvm.tensor.Tensor, but got " "type {}".format(type(tensor))) shape = get_shape(tensor) dtype = tensor.dtype shape_dtype_max_size_check(shape, dtype)
Python
def ops_dtype_check(dtype, args): """check validation of op's dtype.""" expected_dtype = list() def _get_expect_dtype(expected_dtype, arg): if isinstance(arg, str): expected_dtype.append(arg) elif isinstance(arg, DtypeForDavinci): expected_dtype += arg.value elif isinstance(arg, (list, tuple)): for t in arg: _get_expect_dtype(expected_dtype, t) else: raise TypeError("arg should be either a string, a DtypeForDavinci " "or a list/tuple of string or DtypeForDavinci, " "while current is {}".format(type(arg))) _get_expect_dtype(expected_dtype, args) if isinstance(dtype, (list, tuple)): checking_dtype = [d.lower() for d in dtype] elif isinstance(dtype, str): checking_dtype = [dtype.lower()] else: raise TypeError("dtype should be either a string or a tuple/list of string") error_msg = "Supported dtype: {}, while received dtype: {}" if not set(checking_dtype).issubset(set(expected_dtype)): raise RuntimeError(error_msg.format(expected_dtype, checking_dtype))
def ops_dtype_check(dtype, args): """check validation of op's dtype.""" expected_dtype = list() def _get_expect_dtype(expected_dtype, arg): if isinstance(arg, str): expected_dtype.append(arg) elif isinstance(arg, DtypeForDavinci): expected_dtype += arg.value elif isinstance(arg, (list, tuple)): for t in arg: _get_expect_dtype(expected_dtype, t) else: raise TypeError("arg should be either a string, a DtypeForDavinci " "or a list/tuple of string or DtypeForDavinci, " "while current is {}".format(type(arg))) _get_expect_dtype(expected_dtype, args) if isinstance(dtype, (list, tuple)): checking_dtype = [d.lower() for d in dtype] elif isinstance(dtype, str): checking_dtype = [dtype.lower()] else: raise TypeError("dtype should be either a string or a tuple/list of string") error_msg = "Supported dtype: {}, while received dtype: {}" if not set(checking_dtype).issubset(set(expected_dtype)): raise RuntimeError(error_msg.format(expected_dtype, checking_dtype))
Python
def reduce_axis_check(reduce_shape, reduce_axis): """check validation of reduce axis for certain reduce shape.""" dim = len(reduce_shape) if dim == 1 and isinstance(reduce_shape[0], int) and int(reduce_shape[0]) == 1: raise RuntimeError("Error, reduce shape is 1. Scalar is not supported " "for reduction, please input a vector.") if isinstance(reduce_axis, int): if reduce_axis not in range(-dim, dim): raise RuntimeError("Reduce axis should be in range [%d. %d)" "" % (-dim, dim)) elif isinstance(reduce_axis, (tuple, list)): if len(reduce_axis) > len(reduce_shape): raise RuntimeError("Reduce axis list exceed reduce shape length: " "%d vs %d, error" % (len(reduce_axis), len(reduce_shape))) processed_axis = [] for axis in reduce_axis: processed_axis.append(int(axis + dim) if axis < 0 else int(axis)) if len(set(processed_axis)) < len(processed_axis): raise RuntimeError("Reduce axis list contains %d duplicated element, please check" % (len(processed_axis) - len(set(processed_axis)))) for axis in processed_axis: if axis >= dim: raise RuntimeError("Invalid reduce axis, axis should less than %d" % dim) elif reduce_axis is not None: raise RuntimeError("axis should be a list, tuple or int.")
def reduce_axis_check(reduce_shape, reduce_axis): """check validation of reduce axis for certain reduce shape.""" dim = len(reduce_shape) if dim == 1 and isinstance(reduce_shape[0], int) and int(reduce_shape[0]) == 1: raise RuntimeError("Error, reduce shape is 1. Scalar is not supported " "for reduction, please input a vector.") if isinstance(reduce_axis, int): if reduce_axis not in range(-dim, dim): raise RuntimeError("Reduce axis should be in range [%d. %d)" "" % (-dim, dim)) elif isinstance(reduce_axis, (tuple, list)): if len(reduce_axis) > len(reduce_shape): raise RuntimeError("Reduce axis list exceed reduce shape length: " "%d vs %d, error" % (len(reduce_axis), len(reduce_shape))) processed_axis = [] for axis in reduce_axis: processed_axis.append(int(axis + dim) if axis < 0 else int(axis)) if len(set(processed_axis)) < len(processed_axis): raise RuntimeError("Reduce axis list contains %d duplicated element, please check" % (len(processed_axis) - len(set(processed_axis)))) for axis in processed_axis: if axis >= dim: raise RuntimeError("Invalid reduce axis, axis should less than %d" % dim) elif reduce_axis is not None: raise RuntimeError("axis should be a list, tuple or int.")
Python
def elemwise_shape_check(shape_a, shape_b): """check validation of tensor's shape for element-wise op.""" check_shape(shape_a) check_shape(shape_b) if len(shape_a) != len(shape_b): raise RuntimeError("Element-wise operation needs same data length, " "while current is %s vs %s" % (len(shape_a), len(shape_b))) for i, shp in enumerate(shape_a): if int(shp) != int(shape_b[i]): raise RuntimeError("Element-wise operation needs same data shape, " "while current is %s vs %s" % (shp, shape_b[i]))
def elemwise_shape_check(shape_a, shape_b): """check validation of tensor's shape for element-wise op.""" check_shape(shape_a) check_shape(shape_b) if len(shape_a) != len(shape_b): raise RuntimeError("Element-wise operation needs same data length, " "while current is %s vs %s" % (len(shape_a), len(shape_b))) for i, shp in enumerate(shape_a): if int(shp) != int(shape_b[i]): raise RuntimeError("Element-wise operation needs same data shape, " "while current is %s vs %s" % (shp, shape_b[i]))
Python
def elemwise_dtype_check(dtype_a, dtype_b, supported_type=None): """check validation of tensor's dtype for element-wise op.""" if supported_type: ops_dtype_check(dtype_a, supported_type) ops_dtype_check(dtype_b, supported_type) if dtype_a.lower() != dtype_b.lower(): raise RuntimeError("Element-wise operation needs same data type, while " "current is %s vs %s" % (dtype_a.lower(), dtype_b.lower()))
def elemwise_dtype_check(dtype_a, dtype_b, supported_type=None): """check validation of tensor's dtype for element-wise op.""" if supported_type: ops_dtype_check(dtype_a, supported_type) ops_dtype_check(dtype_b, supported_type) if dtype_a.lower() != dtype_b.lower(): raise RuntimeError("Element-wise operation needs same data type, while " "current is %s vs %s" % (dtype_a.lower(), dtype_b.lower()))
Python
def broadcast_check(ori_shape, dst_shape): """check valid broadcast from ori_shape to dst_shape.""" shape_l = get_shape(ori_shape) shape_r = get_shape(dst_shape) if len(shape_l) <= len(shape_r): dim_diff = len(shape_r) - len(shape_l) shape_l = ([1] * dim_diff) + shape_l else: raise RuntimeError("Cannot broadcast from shape %s to %s" % (str(ori_shape), str(dst_shape))) for i, shp in enumerate(shape_l): if int(shp) != int(shape_r[i]) and int(shp) != 1: raise RuntimeError("Cannot broadcast from shape %s to %s" % (str(ori_shape), str(dst_shape)))
def broadcast_check(ori_shape, dst_shape): """check valid broadcast from ori_shape to dst_shape.""" shape_l = get_shape(ori_shape) shape_r = get_shape(dst_shape) if len(shape_l) <= len(shape_r): dim_diff = len(shape_r) - len(shape_l) shape_l = ([1] * dim_diff) + shape_l else: raise RuntimeError("Cannot broadcast from shape %s to %s" % (str(ori_shape), str(dst_shape))) for i, shp in enumerate(shape_l): if int(shp) != int(shape_r[i]) and int(shp) != 1: raise RuntimeError("Cannot broadcast from shape %s to %s" % (str(ori_shape), str(dst_shape)))
Python
def gemm_format_check(lhs_input, rhs_input, lhs_trans=False, rhs_trans=False): """check gemm format (shape length and value).""" dim = len(lhs_input) if len(rhs_input) != dim: raise RuntimeError("Dimensions are different, lhs input is of %d dimension " "while rhs input is of %d dimension, " % (dim, len(rhs_input))) b_pos = [0] if dim == 3 else [0, 1] lhs_k_pos = -2 if lhs_trans else -1 rhs_k_pos = -1 if rhs_trans else -2 def length_check(tensor): if len(tensor) < 2 or len(tensor) > 4: raise RuntimeError("Gemm only support 2d shape (height, weight) " "or 3d shape (batch, height, weight) " "or 4d shape (batch_o, batch_i, height, weight) " " while shape length is %d!" % (len(tensor))) def value_check(loc): if loc == "B": if len(lhs_input) > 2: for pos in b_pos: value = int(lhs_input[pos]) cmp_value = int(rhs_input[pos]) if value != cmp_value: raise RuntimeError("%s size is not compatible, lhs " "input: %d and rhs input: %d" % (loc, value, cmp_value)) if loc == "K": if isinstance(lhs_input[lhs_k_pos], akg.tvm.expr.Var) or isinstance(rhs_input[rhs_k_pos], akg.tvm.expr.Var): return value = int(lhs_input[lhs_k_pos]) cmp_value = int(rhs_input[rhs_k_pos]) if cmp_value != value: raise RuntimeError("%s size is not compatible, lhs :%d, " "rhs input: %d " % (loc, value, cmp_value)) for data in [lhs_input, rhs_input]: length_check(data) for location in ["B", "K"]: value_check(location)
def gemm_format_check(lhs_input, rhs_input, lhs_trans=False, rhs_trans=False): """check gemm format (shape length and value).""" dim = len(lhs_input) if len(rhs_input) != dim: raise RuntimeError("Dimensions are different, lhs input is of %d dimension " "while rhs input is of %d dimension, " % (dim, len(rhs_input))) b_pos = [0] if dim == 3 else [0, 1] lhs_k_pos = -2 if lhs_trans else -1 rhs_k_pos = -1 if rhs_trans else -2 def length_check(tensor): if len(tensor) < 2 or len(tensor) > 4: raise RuntimeError("Gemm only support 2d shape (height, weight) " "or 3d shape (batch, height, weight) " "or 4d shape (batch_o, batch_i, height, weight) " " while shape length is %d!" % (len(tensor))) def value_check(loc): if loc == "B": if len(lhs_input) > 2: for pos in b_pos: value = int(lhs_input[pos]) cmp_value = int(rhs_input[pos]) if value != cmp_value: raise RuntimeError("%s size is not compatible, lhs " "input: %d and rhs input: %d" % (loc, value, cmp_value)) if loc == "K": if isinstance(lhs_input[lhs_k_pos], akg.tvm.expr.Var) or isinstance(rhs_input[rhs_k_pos], akg.tvm.expr.Var): return value = int(lhs_input[lhs_k_pos]) cmp_value = int(rhs_input[rhs_k_pos]) if cmp_value != value: raise RuntimeError("%s size is not compatible, lhs :%d, " "rhs input: %d " % (loc, value, cmp_value)) for data in [lhs_input, rhs_input]: length_check(data) for location in ["B", "K"]: value_check(location)
Python
def davinci_format_check(shape, tensor_format, dim=-1): """check validation of tensor's shape for certain format used in davinci chip.""" all_format_shape = {"NCHW": 4, "NHWC": 4, "NC1HWC0": 5, "DefaultFormat": [2, 4]} if dim not in [-1, 2, 4, 5]: raise RuntimeError("Only support 2d, 4d, 5d format check, please set " "dim to the dim want to check " "or use default value -1 to check both all the dim") if dim == -1: support_format_shape = all_format_shape else: support_format_shape = {} for k, v in all_format_shape.items(): if isinstance(v, int) and v == dim: support_format_shape[k] = v if isinstance(v, list) and dim in v: support_format_shape[k] = v support_shape = {"NC1HWC0": (4, 16)} if not isinstance(tensor_format, str): raise RuntimeError("Invalid davinci format, should be a string, " "but get %s" % (type(tensor_format))) if tensor_format not in support_format_shape.keys(): raise RuntimeError("Invalid davinci format {}, davinci support {}" .format(tensor_format, support_format_shape.keys())) if isinstance(support_format_shape[tensor_format], int): if len(shape) != support_format_shape[tensor_format]: raise RuntimeError("Invalid shape {} for davinci format {}, needs " "{} dim shape, current length{}" .format(shape, tensor_format, support_format_shape[tensor_format], len(shape))) if isinstance(support_format_shape[tensor_format], list): if len(shape) not in support_format_shape[tensor_format]: raise RuntimeError("Invalid shape {} for davinci format {}, needs {} dim shape" .format(shape, tensor_format, support_format_shape[tensor_format])) if tensor_format in support_shape.keys(): check_dim = support_shape[tensor_format][0] expect_shape = support_shape[tensor_format][1] if int(shape[check_dim]) != expect_shape: raise RuntimeError("Invalid shape {} for davinci format {}, dim {} " "should be {}, while current is {}" .format(shape, tensor_format, check_dim, expect_shape, shape[check_dim]))
def davinci_format_check(shape, tensor_format, dim=-1): """check validation of tensor's shape for certain format used in davinci chip.""" all_format_shape = {"NCHW": 4, "NHWC": 4, "NC1HWC0": 5, "DefaultFormat": [2, 4]} if dim not in [-1, 2, 4, 5]: raise RuntimeError("Only support 2d, 4d, 5d format check, please set " "dim to the dim want to check " "or use default value -1 to check both all the dim") if dim == -1: support_format_shape = all_format_shape else: support_format_shape = {} for k, v in all_format_shape.items(): if isinstance(v, int) and v == dim: support_format_shape[k] = v if isinstance(v, list) and dim in v: support_format_shape[k] = v support_shape = {"NC1HWC0": (4, 16)} if not isinstance(tensor_format, str): raise RuntimeError("Invalid davinci format, should be a string, " "but get %s" % (type(tensor_format))) if tensor_format not in support_format_shape.keys(): raise RuntimeError("Invalid davinci format {}, davinci support {}" .format(tensor_format, support_format_shape.keys())) if isinstance(support_format_shape[tensor_format], int): if len(shape) != support_format_shape[tensor_format]: raise RuntimeError("Invalid shape {} for davinci format {}, needs " "{} dim shape, current length{}" .format(shape, tensor_format, support_format_shape[tensor_format], len(shape))) if isinstance(support_format_shape[tensor_format], list): if len(shape) not in support_format_shape[tensor_format]: raise RuntimeError("Invalid shape {} for davinci format {}, needs {} dim shape" .format(shape, tensor_format, support_format_shape[tensor_format])) if tensor_format in support_shape.keys(): check_dim = support_shape[tensor_format][0] expect_shape = support_shape[tensor_format][1] if int(shape[check_dim]) != expect_shape: raise RuntimeError("Invalid shape {} for davinci format {}, dim {} " "should be {}, while current is {}" .format(shape, tensor_format, check_dim, expect_shape, shape[check_dim]))