language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def is_valid_reduce_axis(tensor, reduce_axis):
"""
if the reduce axis correspond to shape[axis] is 1, we can not refine the shape,or the reduce axis will be wrong.
Args:
tensor (tvm.tensor.Tensor): input tensor.
reduce_axis (Union[list, tuple, int]): axis want to reduce.
Returns:
True or False.
"""
# if the reduce axis correspond to shape[axis] is 1, we can not refine the
# shape,or the reduce axis will be wrong
# need_shape_refine = True
shape = get_shape(tensor)
if hasattr(reduce_axis, 'index'):
for id_ite in reduce_axis:
if shape[id_ite] == 1:
return False
else:
if shape[reduce_axis] == 1:
return False
return True | def is_valid_reduce_axis(tensor, reduce_axis):
"""
if the reduce axis correspond to shape[axis] is 1, we can not refine the shape,or the reduce axis will be wrong.
Args:
tensor (tvm.tensor.Tensor): input tensor.
reduce_axis (Union[list, tuple, int]): axis want to reduce.
Returns:
True or False.
"""
# if the reduce axis correspond to shape[axis] is 1, we can not refine the
# shape,or the reduce axis will be wrong
# need_shape_refine = True
shape = get_shape(tensor)
if hasattr(reduce_axis, 'index'):
for id_ite in reduce_axis:
if shape[id_ite] == 1:
return False
else:
if shape[reduce_axis] == 1:
return False
return True |
Python | def axis_check(shape_len, axis):
"""Check the value of axis and return the sorted axis."""
def _axis_value_type_check(value):
if not isinstance(value, int):
raise RuntimeError("type of axis value should be int")
if value >= shape_len or value < -shape_len:
raise RuntimeError(
"input axis is out of range, axis value can be from %d to %d" %
(-shape_len, shape_len - 1))
if value < 0:
value = shape_len + value
return value
if not hasattr(axis, 'index'):
axis = _axis_value_type_check(axis)
return axis
for i, axs in enumerate(axis):
axis[i] = _axis_value_type_check(axs)
axis = sorted(set(axis))
return axis | def axis_check(shape_len, axis):
"""Check the value of axis and return the sorted axis."""
def _axis_value_type_check(value):
if not isinstance(value, int):
raise RuntimeError("type of axis value should be int")
if value >= shape_len or value < -shape_len:
raise RuntimeError(
"input axis is out of range, axis value can be from %d to %d" %
(-shape_len, shape_len - 1))
if value < 0:
value = shape_len + value
return value
if not hasattr(axis, 'index'):
axis = _axis_value_type_check(axis)
return axis
for i, axs in enumerate(axis):
axis[i] = _axis_value_type_check(axs)
axis = sorted(set(axis))
return axis |
Python | def check_typename(arg_name, arg_type, valid_types):
"""Does it contain the _name_ attribute."""
def get_typename(t):
return t.__name__ if hasattr(t, '__name__') else str(t)
if arg_type in valid_types:
return arg_type
type_names = [get_typename(t) for t in valid_types]
if len(valid_types) == 1:
raise ValueError('type of {} should be {}, but got {}'.format(
arg_name, type_names[0], get_typename(arg_type)))
raise ValueError('type of {} should be one of {}, but got {}'.format(
arg_name, type_names, get_typename(arg_type))) | def check_typename(arg_name, arg_type, valid_types):
"""Does it contain the _name_ attribute."""
def get_typename(t):
return t.__name__ if hasattr(t, '__name__') else str(t)
if arg_type in valid_types:
return arg_type
type_names = [get_typename(t) for t in valid_types]
if len(valid_types) == 1:
raise ValueError('type of {} should be {}, but got {}'.format(
arg_name, type_names[0], get_typename(arg_type)))
raise ValueError('type of {} should be one of {}, but got {}'.format(
arg_name, type_names, get_typename(arg_type))) |
Python | def judge_var(num):
"""judge var if a tvm.var, tvm.const or python data type."""
var_dict = {
"python_const": [int, float],
"tvm_const": [
akg.tvm.expr.IntImm, akg.tvm.expr.UIntImm, akg.tvm.expr.FloatImm],
"tvm_var": [akg.tvm.expr.Var]}
num_type = type(num)
for i in var_dict:
if num_type in var_dict[i]:
return i
raise RuntimeError("Input var dtype {} error".format(type(num))) | def judge_var(num):
"""judge var if a tvm.var, tvm.const or python data type."""
var_dict = {
"python_const": [int, float],
"tvm_const": [
akg.tvm.expr.IntImm, akg.tvm.expr.UIntImm, akg.tvm.expr.FloatImm],
"tvm_var": [akg.tvm.expr.Var]}
num_type = type(num)
for i in var_dict:
if num_type in var_dict[i]:
return i
raise RuntimeError("Input var dtype {} error".format(type(num))) |
Python | def check_int_list(array, array_name):
"""check whether all the elements are integers."""
for num in array:
if not isinstance(num, int):
raise RuntimeError("Type of value in %s should be int, but got type %s" % (array_name, type(num))) | def check_int_list(array, array_name):
"""check whether all the elements are integers."""
for num in array:
if not isinstance(num, int):
raise RuntimeError("Type of value in %s should be int, but got type %s" % (array_name, type(num))) |
Python | def _sin(x):
"""implement of Taylor's formula for sine"""
input_x_power = akg.lang.ascend.vmul(x, x)
iter_value = akg.lang.ascend.vmul(
akg.lang.ascend.vmuls(input_x_power, FIRST_FACTOR), x)
res = akg.lang.ascend.vadd(x, iter_value)
i = FIRST_ORDER
while i < LAST_ORDER:
iter_value = akg.lang.ascend.vmuls(
akg.lang.ascend.vmul(input_x_power, iter_value),
-1.0 / (i*(i - 1)))
res = akg.lang.ascend.vadd(res, iter_value)
# add 2 to get the next order
i = i + 2
return res | def _sin(x):
"""implement of Taylor's formula for sine"""
input_x_power = akg.lang.ascend.vmul(x, x)
iter_value = akg.lang.ascend.vmul(
akg.lang.ascend.vmuls(input_x_power, FIRST_FACTOR), x)
res = akg.lang.ascend.vadd(x, iter_value)
i = FIRST_ORDER
while i < LAST_ORDER:
iter_value = akg.lang.ascend.vmuls(
akg.lang.ascend.vmul(input_x_power, iter_value),
-1.0 / (i*(i - 1)))
res = akg.lang.ascend.vadd(res, iter_value)
# add 2 to get the next order
i = i + 2
return res |
Python | def Sin(x, target=utils.CCE):
"""
Computes sine value of a tensor with Taylor's theorem.
.. math::
\\begin{array}{ll} \\\\
sin(x) = x - \\frac{x^3}{3!} + \\frac{x^5}{5!} + ... +
(-1)^k \\cdot \\frac{x^{2(k+1)}}{(2(k+1))!}
\\end{array}
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
Rerurns:
tvm.tensor.Tensor of same type and shape as in_data.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.check_shape(x.shape)
use_call = True
if use_call:
return sin_call(x)
return sin_compute(x) | def Sin(x, target=utils.CCE):
"""
Computes sine value of a tensor with Taylor's theorem.
.. math::
\\begin{array}{ll} \\\\
sin(x) = x - \\frac{x^3}{3!} + \\frac{x^5}{5!} + ... +
(-1)^k \\cdot \\frac{x^{2(k+1)}}{(2(k+1))!}
\\end{array}
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
Rerurns:
tvm.tensor.Tensor of same type and shape as in_data.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.check_shape(x.shape)
use_call = True
if use_call:
return sin_call(x)
return sin_compute(x) |
Python | def four2five_set_dim_func(data, format_, dst_type):
"""set dim info for attr."""
shape = get_shape(data)
if format_ == 'NCHW':
n, _, h, w = shape
else:
n, h, w, _ = shape
shape[0] = 1
if h != 1 and w != 1:
if format_ == 'NCHW' and shape[1] > 16:
shape[1] = 1
if format_ == 'NHWC' and shape[-1] > 16:
shape[-1] = 1
if n == 1:
shape.remove(shape[0])
hash_key = str((tuple(shape), format_, data.dtype, dst_type))
return ct_util.set_dims_by_key(hash_key, four2five_set_dim_map), hash_key | def four2five_set_dim_func(data, format_, dst_type):
"""set dim info for attr."""
shape = get_shape(data)
if format_ == 'NCHW':
n, _, h, w = shape
else:
n, h, w, _ = shape
shape[0] = 1
if h != 1 and w != 1:
if format_ == 'NCHW' and shape[1] > 16:
shape[1] = 1
if format_ == 'NHWC' and shape[-1] > 16:
shape[-1] = 1
if n == 1:
shape.remove(shape[0])
hash_key = str((tuple(shape), format_, data.dtype, dst_type))
return ct_util.set_dims_by_key(hash_key, four2five_set_dim_map), hash_key |
Python | def four2five_tiling_strategy(tensor, input_format, expansion=None):
"""Custom tiling strategy for four2five op."""
strategy = ct_util.create_template(tensor=tensor,
template=ct_util.TileTemplate.NC1HWC0)
if input_format == "NHWC" or expansion:
priority_map = {4: 0, 1: 1, 3: 2, 2: 3, 0: 4} # tile in C0->C1->W->H->N sequence
for pos, priority in priority_map.items():
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=priority,
constraints=ct_util.TileConstraint.SET_PRIORITY,
tensor_pos=pos)[0])
if expansion:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=expansion,
constraints=ct_util.TileConstraint.SET_EXPANSION)[0])
return strategy | def four2five_tiling_strategy(tensor, input_format, expansion=None):
"""Custom tiling strategy for four2five op."""
strategy = ct_util.create_template(tensor=tensor,
template=ct_util.TileTemplate.NC1HWC0)
if input_format == "NHWC" or expansion:
priority_map = {4: 0, 1: 1, 3: 2, 2: 3, 0: 4} # tile in C0->C1->W->H->N sequence
for pos, priority in priority_map.items():
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=priority,
constraints=ct_util.TileConstraint.SET_PRIORITY,
tensor_pos=pos)[0])
if expansion:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=expansion,
constraints=ct_util.TileConstraint.SET_EXPANSION)[0])
return strategy |
Python | def four2five_tiling_strategy_dynamic(tensor, input_format):
"""Custom tiling strategy for four2five op."""
strategy = list()
if input_format == "NCHW":
shape = get_shape(tensor)
if shape[1] == 1:
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 0)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 1)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 2)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 112, ct_util.TileConstraint.FACTOR, 3)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 16, ct_util.TileConstraint.FACTOR, 4)[0])
elif shape[1] == 128:
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 0)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 1)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 2)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, "FULL", ct_util.TileConstraint.MAX, 3)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 16, ct_util.TileConstraint.FACTOR, 4)[0])
return strategy | def four2five_tiling_strategy_dynamic(tensor, input_format):
"""Custom tiling strategy for four2five op."""
strategy = list()
if input_format == "NCHW":
shape = get_shape(tensor)
if shape[1] == 1:
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 0)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 1)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 2)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 112, ct_util.TileConstraint.FACTOR, 3)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 16, ct_util.TileConstraint.FACTOR, 4)[0])
elif shape[1] == 128:
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 0)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 1)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 1, ct_util.TileConstraint.FACTOR, 2)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, "FULL", ct_util.TileConstraint.MAX, 3)[0])
strategy.append(ct_util.create_constraint_on_tensor(tensor, 16, ct_util.TileConstraint.FACTOR, 4)[0])
return strategy |
Python | def AssignAdd(data, value, target=utils.CCE):
"""
Computes data + value elementwise.
Note:
Only supports broadcast on input tensor value.
Args:
data (tvm.tensor.Tensor): Data tensor.
value (tvm.tensor.Tensor): Value tensor, broadcast is allowed.
Returns:
fake_output: Invalid value, just to suit for framework.
res: assign add result, tvm.tensor.Tensor, with same type and shape as input tensor data.
attrs: dict.
"""
input_shape = [x.value for x in data.shape]
value_shape = [x.value for x in value.shape]
if len(input_shape) < len(value_shape):
raise RuntimeError("Do not support broadcast on input tensor data!")
for i in range(len(value_shape)):
if input_shape[len(input_shape) - i - 1] < value_shape[len(value_shape) - i - 1]:
raise RuntimeError("Only support on input tensor value!")
# broadcast adds extra compute and stage, avoid by checking the shapes before hand
if len(value_shape) < len(input_shape) or value_shape != input_shape:
broadcasted_value = akg.topi.broadcast_to(value, input_shape)
res = akg.lang.ascend.vadd(data, broadcasted_value)
else:
res = akg.lang.ascend.vadd(data, value)
res, binds_info = TensorUtils.inplace_set(data, res)
attrs = {utils.BINDS: binds_info}
return res, attrs | def AssignAdd(data, value, target=utils.CCE):
"""
Computes data + value elementwise.
Note:
Only supports broadcast on input tensor value.
Args:
data (tvm.tensor.Tensor): Data tensor.
value (tvm.tensor.Tensor): Value tensor, broadcast is allowed.
Returns:
fake_output: Invalid value, just to suit for framework.
res: assign add result, tvm.tensor.Tensor, with same type and shape as input tensor data.
attrs: dict.
"""
input_shape = [x.value for x in data.shape]
value_shape = [x.value for x in value.shape]
if len(input_shape) < len(value_shape):
raise RuntimeError("Do not support broadcast on input tensor data!")
for i in range(len(value_shape)):
if input_shape[len(input_shape) - i - 1] < value_shape[len(value_shape) - i - 1]:
raise RuntimeError("Only support on input tensor value!")
# broadcast adds extra compute and stage, avoid by checking the shapes before hand
if len(value_shape) < len(input_shape) or value_shape != input_shape:
broadcasted_value = akg.topi.broadcast_to(value, input_shape)
res = akg.lang.ascend.vadd(data, broadcasted_value)
else:
res = akg.lang.ascend.vadd(data, value)
res, binds_info = TensorUtils.inplace_set(data, res)
attrs = {utils.BINDS: binds_info}
return res, attrs |
Python | def sliceeven(input, target="cce"):
"""
Find all even index.
Note:
if the index is even return this index else return 0.
Args:
input (tvm.tensor.Tensor): Tensor of type float16, float32, must be 1D-Tensor, real input is the input's index.
Returns:
tvm.tensor.Tensor, has same type and shape as input.
"""
dtype = input.dtype
shape = [x.value for x in input.shape]
check_list = ["float16", "float32"]
if not dtype in check_list:
raise RuntimeError("sliceeven_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
utils.check_shape(shape)
assert len(shape) == 1
res = akg.tvm.compute(shape, lambda i: akg.tvm.if_then_else(
i % 2 == 0,
input[i], akg.tvm.const(0, input.dtype)))
return res | def sliceeven(input, target="cce"):
"""
Find all even index.
Note:
if the index is even return this index else return 0.
Args:
input (tvm.tensor.Tensor): Tensor of type float16, float32, must be 1D-Tensor, real input is the input's index.
Returns:
tvm.tensor.Tensor, has same type and shape as input.
"""
dtype = input.dtype
shape = [x.value for x in input.shape]
check_list = ["float16", "float32"]
if not dtype in check_list:
raise RuntimeError("sliceeven_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
utils.check_shape(shape)
assert len(shape) == 1
res = akg.tvm.compute(shape, lambda i: akg.tvm.if_then_else(
i % 2 == 0,
input[i], akg.tvm.const(0, input.dtype)))
return res |
Python | def conv_filter_ad_tensor(data, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=None):
"""wraper of convolution filter backprop func."""
data_list = tvm_array_to_list(data)
fmap_shape = expr_to_int(fmap_shape)
filter_shape = expr_to_int(filter_shape)
pad_ = expr_to_int(pad_)
stride_ = expr_to_int(stride_)
dilation_ = expr_to_int(dilation_)
c, _ = ConvBackpropFilter(data_list, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=attrs)
return c | def conv_filter_ad_tensor(data, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=None):
"""wraper of convolution filter backprop func."""
data_list = tvm_array_to_list(data)
fmap_shape = expr_to_int(fmap_shape)
filter_shape = expr_to_int(filter_shape)
pad_ = expr_to_int(pad_)
stride_ = expr_to_int(stride_)
dilation_ = expr_to_int(dilation_)
c, _ = ConvBackpropFilter(data_list, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=attrs)
return c |
Python | def ConvFilterAd(filter_ad_inputs, fmap_shape, filter_shape,
pad_, stride_, dilation_, attrs=None, target=utils.CCE):
"""
Compute dw according to "conv forward".
Args:
filter_ad_inputs (list[tvm.tensor.Tensor]): list with length 2.
data[0](consider as dy) Tensor of type float16 ,shape 5D(out_n, out_c//C0, out_h, out_w,C0).
data[1](consider as x) Tensor of type float16 ,shape 5D(fN,fC//C0,fH,fW,C0).
fmap_shape (list): [fN, fC, fH, fW].
filter_shape (list): [wN, wC, wH, wW].
pad_ (list): [pad_left, pad_right, pad_top, pad_bottom].
stride_ (list): [stride_h, stride_w].
dilation_ (list): [dilation_h, dilation_w].
attrs (dict): a dict with keys like conv_tile,bypass.
Returns:
tvm.tensor.Tensor, configs.
Supported Platforms:
'Ascend'
"""
backward_dy, forward_x = filter_ad_inputs
block_size = 16
_, in_c, _, _ = fmap_shape
cout, _, w_h, w_w = filter_shape
in_c = (in_c + block_size - 1) // block_size * block_size
cout = (cout + block_size - 1) // block_size * block_size
w_fractal_shape = ((in_c // block_size) * w_h * w_w, cout // block_size, block_size, block_size)
forward_w = akg.tvm.placeholder(w_fractal_shape, forward_x.dtype, "input_W")
original_filter_shape = akg.tvm.placeholder(filter_shape, forward_x.dtype, "input_filter")
forward_output, _ = Conv([forward_x, forward_w], fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias=False, attrs=attrs)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 0}
jacs = list(akg.differentiate(forward_output, [forward_w], backward_dy, ad_attrs,
[backward_dy, forward_x, original_filter_shape]))
configs = conv_filter_ad_config([backward_dy, forward_x], fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=attrs)
return jacs[0], configs | def ConvFilterAd(filter_ad_inputs, fmap_shape, filter_shape,
pad_, stride_, dilation_, attrs=None, target=utils.CCE):
"""
Compute dw according to "conv forward".
Args:
filter_ad_inputs (list[tvm.tensor.Tensor]): list with length 2.
data[0](consider as dy) Tensor of type float16 ,shape 5D(out_n, out_c//C0, out_h, out_w,C0).
data[1](consider as x) Tensor of type float16 ,shape 5D(fN,fC//C0,fH,fW,C0).
fmap_shape (list): [fN, fC, fH, fW].
filter_shape (list): [wN, wC, wH, wW].
pad_ (list): [pad_left, pad_right, pad_top, pad_bottom].
stride_ (list): [stride_h, stride_w].
dilation_ (list): [dilation_h, dilation_w].
attrs (dict): a dict with keys like conv_tile,bypass.
Returns:
tvm.tensor.Tensor, configs.
Supported Platforms:
'Ascend'
"""
backward_dy, forward_x = filter_ad_inputs
block_size = 16
_, in_c, _, _ = fmap_shape
cout, _, w_h, w_w = filter_shape
in_c = (in_c + block_size - 1) // block_size * block_size
cout = (cout + block_size - 1) // block_size * block_size
w_fractal_shape = ((in_c // block_size) * w_h * w_w, cout // block_size, block_size, block_size)
forward_w = akg.tvm.placeholder(w_fractal_shape, forward_x.dtype, "input_W")
original_filter_shape = akg.tvm.placeholder(filter_shape, forward_x.dtype, "input_filter")
forward_output, _ = Conv([forward_x, forward_w], fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias=False, attrs=attrs)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 0}
jacs = list(akg.differentiate(forward_output, [forward_w], backward_dy, ad_attrs,
[backward_dy, forward_x, original_filter_shape]))
configs = conv_filter_ad_config([backward_dy, forward_x], fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=attrs)
return jacs[0], configs |
Python | def BiasAddAdV2(head, input_shape, data_format, target=utils.CCE):
"""Compute gradient for bias_add operator using automatic differentiate."""
check_list = ["NHWC", "NC1HWC0", "DefaultFormat"]
if data_format not in check_list:
raise RuntimeError("bias_add_grad only support %s while dataformat is %s" % (",".join(check_list), data_format))
head_plh = akg.tvm.placeholder(head.shape, head.dtype, "head_plh")
if data_format == "NC1HWC0":
bias_shape = (1, head.shape[1], 1, 1, head.shape[4])
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
elif data_format == "NHWC":
bias_shape = (input_shape[-1],)
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
else:
bias_shape = (input_shape[1],)
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
bias_add_res = BiasAdd(head_plh, bias_plh, data_format)
shape1 = [x.value for x in head_plh.shape]
shape2 = [x.value for x in bias_plh.shape]
def custom_bias_add_diff(out, input_data, head, ad_attrs, new_pld_array):
if len(shape2) != 1:
raise RuntimeError("Default Format needs Bias is a 1D Tensor!")
if data_format == "NHWC":
return [akg.tvm.compute(shape2, lambda l: head[0, 0, 0, l])]
if data_format == "DefaultFormat":
if len(shape1) == 2:
return [akg.tvm.compute(shape2, lambda l: head[0, l])]
if len(shape1) == 4:
return [akg.tvm.compute(shape2, lambda l: head[0, l, 0, 0])]
raise RuntimeError("bias_add only support 2D and 4D shape while dataformat is DefaultFormat")
return None
if data_format == "NC1HWC0":
jacs = list(akg.differentiate(bias_add_res, [bias_plh], head))
else:
variables = akg.get_variables("reshape_diff")
jacs = list(akg.differentiate(bias_add_res, [bias_plh], head, None, None,
override={variables[0]: (variables[1], custom_bias_add_diff)}))
return jacs[0] | def BiasAddAdV2(head, input_shape, data_format, target=utils.CCE):
"""Compute gradient for bias_add operator using automatic differentiate."""
check_list = ["NHWC", "NC1HWC0", "DefaultFormat"]
if data_format not in check_list:
raise RuntimeError("bias_add_grad only support %s while dataformat is %s" % (",".join(check_list), data_format))
head_plh = akg.tvm.placeholder(head.shape, head.dtype, "head_plh")
if data_format == "NC1HWC0":
bias_shape = (1, head.shape[1], 1, 1, head.shape[4])
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
elif data_format == "NHWC":
bias_shape = (input_shape[-1],)
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
else:
bias_shape = (input_shape[1],)
bias_plh = akg.tvm.placeholder(bias_shape, head.dtype, "bias_plh")
bias_add_res = BiasAdd(head_plh, bias_plh, data_format)
shape1 = [x.value for x in head_plh.shape]
shape2 = [x.value for x in bias_plh.shape]
def custom_bias_add_diff(out, input_data, head, ad_attrs, new_pld_array):
if len(shape2) != 1:
raise RuntimeError("Default Format needs Bias is a 1D Tensor!")
if data_format == "NHWC":
return [akg.tvm.compute(shape2, lambda l: head[0, 0, 0, l])]
if data_format == "DefaultFormat":
if len(shape1) == 2:
return [akg.tvm.compute(shape2, lambda l: head[0, l])]
if len(shape1) == 4:
return [akg.tvm.compute(shape2, lambda l: head[0, l, 0, 0])]
raise RuntimeError("bias_add only support 2D and 4D shape while dataformat is DefaultFormat")
return None
if data_format == "NC1HWC0":
jacs = list(akg.differentiate(bias_add_res, [bias_plh], head))
else:
variables = akg.get_variables("reshape_diff")
jacs = list(akg.differentiate(bias_add_res, [bias_plh], head, None, None,
override={variables[0]: (variables[1], custom_bias_add_diff)}))
return jacs[0] |
Python | def validate_and_normalize_path(
path,
check_absolute_path=False,
allow_parent_dir=True,
):
"""
Validates path and returns its normalized form.
If path has a valid scheme, treat path as url, otherwise consider path a
unix local path.
Note:
File scheme (rfc8089) is currently not supported.
Args:
path (str): Path to be normalized.
check_absolute_path (bool): Whether check path scheme is supported.
allow_parent_dir (bool): Whether allow parent dir in path.
Returns:
str, normalized path.
"""
if not path:
raise RuntimeError("The path is invalid!")
path_str = str(path)
if not allow_parent_dir:
path_components = path_str.split("/")
if ".." in path_components:
raise RuntimeError("The parent path is not allowed!")
# path does not have valid schema, treat it as unix local path.
if check_absolute_path:
if not path_str.startswith("/"):
raise RuntimeError("The path is invalid!")
try:
# most unix systems allow
normalized_path = os.path.realpath(path)
except ValueError:
raise RuntimeError("The path is invalid!")
return normalized_path | def validate_and_normalize_path(
path,
check_absolute_path=False,
allow_parent_dir=True,
):
"""
Validates path and returns its normalized form.
If path has a valid scheme, treat path as url, otherwise consider path a
unix local path.
Note:
File scheme (rfc8089) is currently not supported.
Args:
path (str): Path to be normalized.
check_absolute_path (bool): Whether check path scheme is supported.
allow_parent_dir (bool): Whether allow parent dir in path.
Returns:
str, normalized path.
"""
if not path:
raise RuntimeError("The path is invalid!")
path_str = str(path)
if not allow_parent_dir:
path_components = path_str.split("/")
if ".." in path_components:
raise RuntimeError("The parent path is not allowed!")
# path does not have valid schema, treat it as unix local path.
if check_absolute_path:
if not path_str.startswith("/"):
raise RuntimeError("The path is invalid!")
try:
# most unix systems allow
normalized_path = os.path.realpath(path)
except ValueError:
raise RuntimeError("The path is invalid!")
return normalized_path |
Python | def _get_source_file(self):
"""Get hwts log file name, which was created by ada service."""
file_name = get_file_join_name(self._input_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(self._input_path, self._source_file_target_old)
if not file_name:
data_path = os.path.join(self._input_path, "data")
file_name = get_file_join_name(data_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(data_path, self._source_file_target_old)
if not file_name:
msg = "Fail to find hwts log file, under profiling directory"
raise RuntimeError(msg)
return file_name | def _get_source_file(self):
"""Get hwts log file name, which was created by ada service."""
file_name = get_file_join_name(self._input_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(self._input_path, self._source_file_target_old)
if not file_name:
data_path = os.path.join(self._input_path, "data")
file_name = get_file_join_name(data_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(data_path, self._source_file_target_old)
if not file_name:
msg = "Fail to find hwts log file, under profiling directory"
raise RuntimeError(msg)
return file_name |
Python | def execute(self):
"""
Execute the parser, get result data, and write it to the output file.
Returns:
bool, whether succeed to analyse hwts log.
"""
content_format = ['QIIIIIIIIIIII', 'QIIQIIIIIIII', 'IIIIQIIIIIIII']
log_type = ['Start of task', 'End of task', 'Start of block', 'End of block', 'Block PMU']
result_data = ""
self._source_flie_name = validate_and_normalize_path(self._source_flie_name)
last_syscnt = 0
cycles = 0
kernel_label = tvm.get_global_func("ascend_get_kernel_label")()
with open(self._source_flie_name, 'rb') as hwts_data:
while True:
# read 64 bit data
line = hwts_data.read(64)
if line:
if not line.strip():
continue
else:
break
byte_first_four = struct.unpack('BBHHH', line[0:8])
# byte_first[0:4] refers to count. byte_first[4] refers to is_warn_res0_0v.
# byte_first[5:8] refers to the type of ms.
byte_first = bin(byte_first_four[0]).replace('0b', '').zfill(8)
ms_type = byte_first[-3:]
is_warn_res0_ov = byte_first[4]
cnt = int(byte_first[0:4], 2)
core_id = byte_first_four[1]
blk_id, task_id = byte_first_four[3], byte_first_four[4]
if ms_type in ['000', '001', '010']: # log type 0,1,2
result = struct.unpack(content_format[0], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '011': # log type 3
result = struct.unpack(content_format[1], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '100': # log type 4
result = struct.unpack(content_format[2], line[8:])
stream_id = result[2]
if is_warn_res0_ov == '0':
syscnt = result[4]
else:
syscnt = None
else:
logger.info("Profiling: invalid hwts log record type %s", ms_type)
continue
if int(task_id) < 25000:
task_id = str(task_id)
if kernel_label == (str(stream_id) + '_' +str(task_id)):
if log_type[int(ms_type, 2)] == "Start of task":
last_syscnt = syscnt
elif log_type[int(ms_type, 2)] == "End of task":
cycles += syscnt - last_syscnt
if self._is_print:
result_data += ("%-14s %-4s %-8s %-9s %-8s %-15s %s\n" %(log_type[int(ms_type, 2)], cnt, core_id,
blk_id, task_id, syscnt, stream_id))
if self._is_print:
fwrite_format(self._output_filename, data_source=self._dst_file_title, is_start=True)
fwrite_format(self._output_filename, data_source=self._dst_file_column_title)
fwrite_format(self._output_filename, data_source=result_data)
return cycles if cycles != 0 else max_time_consume | def execute(self):
"""
Execute the parser, get result data, and write it to the output file.
Returns:
bool, whether succeed to analyse hwts log.
"""
content_format = ['QIIIIIIIIIIII', 'QIIQIIIIIIII', 'IIIIQIIIIIIII']
log_type = ['Start of task', 'End of task', 'Start of block', 'End of block', 'Block PMU']
result_data = ""
self._source_flie_name = validate_and_normalize_path(self._source_flie_name)
last_syscnt = 0
cycles = 0
kernel_label = tvm.get_global_func("ascend_get_kernel_label")()
with open(self._source_flie_name, 'rb') as hwts_data:
while True:
# read 64 bit data
line = hwts_data.read(64)
if line:
if not line.strip():
continue
else:
break
byte_first_four = struct.unpack('BBHHH', line[0:8])
# byte_first[0:4] refers to count. byte_first[4] refers to is_warn_res0_0v.
# byte_first[5:8] refers to the type of ms.
byte_first = bin(byte_first_four[0]).replace('0b', '').zfill(8)
ms_type = byte_first[-3:]
is_warn_res0_ov = byte_first[4]
cnt = int(byte_first[0:4], 2)
core_id = byte_first_four[1]
blk_id, task_id = byte_first_four[3], byte_first_four[4]
if ms_type in ['000', '001', '010']: # log type 0,1,2
result = struct.unpack(content_format[0], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '011': # log type 3
result = struct.unpack(content_format[1], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '100': # log type 4
result = struct.unpack(content_format[2], line[8:])
stream_id = result[2]
if is_warn_res0_ov == '0':
syscnt = result[4]
else:
syscnt = None
else:
logger.info("Profiling: invalid hwts log record type %s", ms_type)
continue
if int(task_id) < 25000:
task_id = str(task_id)
if kernel_label == (str(stream_id) + '_' +str(task_id)):
if log_type[int(ms_type, 2)] == "Start of task":
last_syscnt = syscnt
elif log_type[int(ms_type, 2)] == "End of task":
cycles += syscnt - last_syscnt
if self._is_print:
result_data += ("%-14s %-4s %-8s %-9s %-8s %-15s %s\n" %(log_type[int(ms_type, 2)], cnt, core_id,
blk_id, task_id, syscnt, stream_id))
if self._is_print:
fwrite_format(self._output_filename, data_source=self._dst_file_title, is_start=True)
fwrite_format(self._output_filename, data_source=self._dst_file_column_title)
fwrite_format(self._output_filename, data_source=result_data)
return cycles if cycles != 0 else max_time_consume |
Python | def logsoftmax(inputs, axis, target="cce"):
"""
Activation function, computes log softmax.
Args:
inputs: Tensor.
axis: On which dimension log softmax is performed.
Return:
Tensor, which has the same shape and type as input.
"""
dtype = inputs.dtype
utils.check_shape(inputs.shape)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
axis = refine_reduce_axis(inputs, axis)
if isinstance(axis, (list, tuple)):
if len(axis) != 1:
raise RuntimeError("Reduce axis for logsoftmax op must br 1-dimension, while current is %d-dimension"
% (len(axis)))
axis = axis[0]
out = logsoftmax_op(inputs, inputs.shape, axis)
attr_map = {"pragma_modshift": 1, "disable_cse": 1}
return out, attr_map | def logsoftmax(inputs, axis, target="cce"):
"""
Activation function, computes log softmax.
Args:
inputs: Tensor.
axis: On which dimension log softmax is performed.
Return:
Tensor, which has the same shape and type as input.
"""
dtype = inputs.dtype
utils.check_shape(inputs.shape)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
axis = refine_reduce_axis(inputs, axis)
if isinstance(axis, (list, tuple)):
if len(axis) != 1:
raise RuntimeError("Reduce axis for logsoftmax op must br 1-dimension, while current is %d-dimension"
% (len(axis)))
axis = axis[0]
out = logsoftmax_op(inputs, inputs.shape, axis)
attr_map = {"pragma_modshift": 1, "disable_cse": 1}
return out, attr_map |
Python | def _load_lib():
"""Load libary by searching possible path."""
lib_path = []
pwd = os.path.dirname(os.path.realpath(__file__))
paths = [os.path.realpath(pwd + "/../../lib"), os.path.realpath(pwd + "/../../../../../mindspore/lib")]
tar_so = "libakg.so"
for path in paths:
found_lib = False
if os.path.exists(path):
files = os.listdir(path)
for f in files:
if f == tar_so:
lib_path.append(path + "/" + f)
found_lib = True
break
if found_lib:
break
if not lib_path:
lib_path = libinfo.find_lib_path()
if not lib_path:
raise RuntimeError("Cannot find library {}.".format(tar_so))
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
return lib, os.path.basename(lib_path[0]) | def _load_lib():
"""Load libary by searching possible path."""
lib_path = []
pwd = os.path.dirname(os.path.realpath(__file__))
paths = [os.path.realpath(pwd + "/../../lib"), os.path.realpath(pwd + "/../../../../../mindspore/lib")]
tar_so = "libakg.so"
for path in paths:
found_lib = False
if os.path.exists(path):
files = os.listdir(path)
for f in files:
if f == tar_so:
lib_path.append(path + "/" + f)
found_lib = True
break
if found_lib:
break
if not lib_path:
lib_path = libinfo.find_lib_path()
if not lib_path:
raise RuntimeError("Cannot find library {}.".format(tar_so))
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
return lib, os.path.basename(lib_path[0]) |
Python | def discontinous_mov(data, out_shape, target=utils.CCE):
"""
Extract the element with the odd index from the original data and copy it into a tensor with a dimension of
2 * original dimension/2.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
out_shape (list): a list of output's shape.
Returns:
tvm.tensor.Tensor, has the same type as data, but it's shape changes to out_shape not data's shape.
Example:
if data = [1,2,3,4,5,6,7,8,9,10] then the output = [[1,3,5,7,9],[1,3,5,7,9]].
"""
# check types
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
utils.check_shape(shape)
output = akg.tvm.compute(out_shape, lambda j, i: data[i * 2], name="output")
return output | def discontinous_mov(data, out_shape, target=utils.CCE):
"""
Extract the element with the odd index from the original data and copy it into a tensor with a dimension of
2 * original dimension/2.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
out_shape (list): a list of output's shape.
Returns:
tvm.tensor.Tensor, has the same type as data, but it's shape changes to out_shape not data's shape.
Example:
if data = [1,2,3,4,5,6,7,8,9,10] then the output = [[1,3,5,7,9],[1,3,5,7,9]].
"""
# check types
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
utils.check_shape(shape)
output = akg.tvm.compute(out_shape, lambda j, i: data[i * 2], name="output")
return output |
Python | def apply_proximal_adagrad_run(shape, dtype, attrs=None):
"""run function for dsl function apply_proximal_adagrad."""
scalar_shape = (1,)
var_shape, accum_shape, grad_shape = [shape] * 3
lr_shape, l1_shape, l2_shape = [scalar_shape] * 3
shapes = [var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape]
dtypes = [dtype] * 6
mod = utils.op_build_test(apply_proximal_adagrad, shapes, dtypes,
kernel_name='apply_proximal_adagrad', attrs=attrs)
expects, (var, accum, lr, l1, l2, grad) = gen_data(dtype, shape)
outputs = utils.mod_launch(mod, (var, accum, lr, l1, l2, grad), outputs=(0, 1))
rtol, atol = get_rtol_atol("apply_proximal_adagrad", dtype)
compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
inputs = (var, accum, lr, l1, l2, grad)
return inputs, outputs, expects, all(compare_result) | def apply_proximal_adagrad_run(shape, dtype, attrs=None):
"""run function for dsl function apply_proximal_adagrad."""
scalar_shape = (1,)
var_shape, accum_shape, grad_shape = [shape] * 3
lr_shape, l1_shape, l2_shape = [scalar_shape] * 3
shapes = [var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape]
dtypes = [dtype] * 6
mod = utils.op_build_test(apply_proximal_adagrad, shapes, dtypes,
kernel_name='apply_proximal_adagrad', attrs=attrs)
expects, (var, accum, lr, l1, l2, grad) = gen_data(dtype, shape)
outputs = utils.mod_launch(mod, (var, accum, lr, l1, l2, grad), outputs=(0, 1))
rtol, atol = get_rtol_atol("apply_proximal_adagrad", dtype)
compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
inputs = (var, accum, lr, l1, l2, grad)
return inputs, outputs, expects, all(compare_result) |
Python | def gen_data(dtype, shape):
"""Generate data for testing the op"""
# tensors
var = random_gaussian(shape).astype(dtype)
accum = np.abs(random_gaussian(shape).astype(dtype))
grad = random_gaussian(shape).astype(dtype)
# scalars
scalar_shape = (1,)
lr = np.random.random_sample(scalar_shape).astype(dtype)
l1 = np.random.randn(*scalar_shape).astype(dtype)
l2 = np.random.random_sample(scalar_shape).astype(dtype)
input_data = (var, accum, lr, l1, l2, grad)
expect = _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad)
return expect, input_data | def gen_data(dtype, shape):
"""Generate data for testing the op"""
# tensors
var = random_gaussian(shape).astype(dtype)
accum = np.abs(random_gaussian(shape).astype(dtype))
grad = random_gaussian(shape).astype(dtype)
# scalars
scalar_shape = (1,)
lr = np.random.random_sample(scalar_shape).astype(dtype)
l1 = np.random.randn(*scalar_shape).astype(dtype)
l2 = np.random.random_sample(scalar_shape).astype(dtype)
input_data = (var, accum, lr, l1, l2, grad)
expect = _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad)
return expect, input_data |
Python | def acosh_grad_run(shape, dtype, attrs):
"""run function for dsl function acosh_grad."""
shapes = [shape, shape]
dtypes = [dtype, dtype]
mod = utils.op_build_test(AcoshGrad, shapes, dtypes,
kernel_name="acosh_grad", attrs=attrs)
bench_mark, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, inputs + [output], expect=bench_mark)
rtol, atol = get_rtol_atol("acosh_grad", dtype)
compare_res = compare_tensor(output, bench_mark, rtol=rtol, atol=atol)
return inputs, output, bench_mark, compare_res | def acosh_grad_run(shape, dtype, attrs):
"""run function for dsl function acosh_grad."""
shapes = [shape, shape]
dtypes = [dtype, dtype]
mod = utils.op_build_test(AcoshGrad, shapes, dtypes,
kernel_name="acosh_grad", attrs=attrs)
bench_mark, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, inputs + [output], expect=bench_mark)
rtol, atol = get_rtol_atol("acosh_grad", dtype)
compare_res = compare_tensor(output, bench_mark, rtol=rtol, atol=atol)
return inputs, output, bench_mark, compare_res |
Python | def gen_data(dtype, shape):
"""Generate data for testing the op"""
y = random_gaussian(size=shape).astype(dtype)
if dtype == "float16":
# If the value of y is too small, there will be some overflow
lower_bound = 1e-3
y = np.select([y >= 0, y < 0], [np.maximum(y, lower_bound), np.minimum(y, -lower_bound)])
dy = random_gaussian(size=shape).astype(dtype)
expect = _acosh_grad_compute(y, dy)
output = np.full(expect.shape, np.nan, dtype)
return expect, [y, dy], output | def gen_data(dtype, shape):
"""Generate data for testing the op"""
y = random_gaussian(size=shape).astype(dtype)
if dtype == "float16":
# If the value of y is too small, there will be some overflow
lower_bound = 1e-3
y = np.select([y >= 0, y < 0], [np.maximum(y, lower_bound), np.minimum(y, -lower_bound)])
dy = random_gaussian(size=shape).astype(dtype)
expect = _acosh_grad_compute(y, dy)
output = np.full(expect.shape, np.nan, dtype)
return expect, [y, dy], output |
Python | def lstmcell(inputs, hx, cx, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
Computes the hidden and state variables of a Long Short Term Memory (lstm) cell.
Args:
input: akg.tvm.Tensor of type float16, float32 with shape [batch, input_size].
hx: akg.tvm.Tensor for hidden variable from previous cell with shape [batch, hidden_size].
cx: akg.tvm.Tensor for state variable from previous cell with shape [batch, hidden_size].
w_ih: akg.tvm.Tensor for input weights with shape [4*hidden_size, input_size].
w_hh: akg.tvm.Tensor for hidden weights with shape [4*hidden_size, hidden_size].
b_ih: akg.tvm.Tensor for input bias with shape [4*hidden_size].
b_hh: akg.tvm.Tensor for hidden bias with shape [4*hidden_size].
Returns:
hy: akg.tvm.Tensor for hidden variable of current cell.
cy: akg.tvm.Tensor for state variable of current cell.
"""
w_i_ih, w_f_ih, w_c_ih, w_o_ih = Split(w_ih, 4, 0)
b_i_ih, b_f_ih, b_c_ih, b_o_ih = Split(b_ih, 4)
w_i_hh, w_f_hh, w_c_hh, w_o_hh = Split(w_hh, 4, 0)
b_i_hh, b_f_hh, b_c_hh, b_o_hh = Split(b_hh, 4)
# gates:[batch, 4*hidden_size] ih*wh+bias
# ingate, forgetgate, cellgate, outgate = split(gates, 4, 1)
i = dense(inputs, w_i_ih, b_i_ih, use_bias) + dense(hx, w_i_hh, b_i_hh, use_bias)
f = dense(inputs, w_f_ih, b_f_ih, use_bias) + dense(hx, w_f_hh, b_f_hh, use_bias)
c = dense(inputs, w_c_ih, b_c_ih, use_bias) + dense(hx, w_c_hh, b_c_hh, use_bias)
o = dense(inputs, w_o_ih, b_o_ih, use_bias) + dense(hx, w_o_hh, b_o_hh, use_bias)
cy = (sigmoid(f) * cx) + (sigmoid(i) * Tanh(c))
hy = sigmoid(o) * Tanh(cy)
return hy, cy | def lstmcell(inputs, hx, cx, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
Computes the hidden and state variables of a Long Short Term Memory (lstm) cell.
Args:
input: akg.tvm.Tensor of type float16, float32 with shape [batch, input_size].
hx: akg.tvm.Tensor for hidden variable from previous cell with shape [batch, hidden_size].
cx: akg.tvm.Tensor for state variable from previous cell with shape [batch, hidden_size].
w_ih: akg.tvm.Tensor for input weights with shape [4*hidden_size, input_size].
w_hh: akg.tvm.Tensor for hidden weights with shape [4*hidden_size, hidden_size].
b_ih: akg.tvm.Tensor for input bias with shape [4*hidden_size].
b_hh: akg.tvm.Tensor for hidden bias with shape [4*hidden_size].
Returns:
hy: akg.tvm.Tensor for hidden variable of current cell.
cy: akg.tvm.Tensor for state variable of current cell.
"""
w_i_ih, w_f_ih, w_c_ih, w_o_ih = Split(w_ih, 4, 0)
b_i_ih, b_f_ih, b_c_ih, b_o_ih = Split(b_ih, 4)
w_i_hh, w_f_hh, w_c_hh, w_o_hh = Split(w_hh, 4, 0)
b_i_hh, b_f_hh, b_c_hh, b_o_hh = Split(b_hh, 4)
# gates:[batch, 4*hidden_size] ih*wh+bias
# ingate, forgetgate, cellgate, outgate = split(gates, 4, 1)
i = dense(inputs, w_i_ih, b_i_ih, use_bias) + dense(hx, w_i_hh, b_i_hh, use_bias)
f = dense(inputs, w_f_ih, b_f_ih, use_bias) + dense(hx, w_f_hh, b_f_hh, use_bias)
c = dense(inputs, w_c_ih, b_c_ih, use_bias) + dense(hx, w_c_hh, b_c_hh, use_bias)
o = dense(inputs, w_o_ih, b_o_ih, use_bias) + dense(hx, w_o_hh, b_o_hh, use_bias)
cy = (sigmoid(f) * cx) + (sigmoid(i) * Tanh(c))
hy = sigmoid(o) * Tanh(cy)
return hy, cy |
Python | def rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
RNN cell with tanh non-linearity.
Args:
inputs: akg.tvm.Tensor of type float16, float32.
hidden: akg.tvm.Tensor for hidden variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
h: akg.tvm.Tensor for hidden output variable of current cell.
"""
igates = dense(inputs, w_ih, b_ih, use_bias)
hgates = dense(hidden, w_hh, b_hh, use_bias)
h = Tanh(igates + hgates)
return h | def rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
RNN cell with tanh non-linearity.
Args:
inputs: akg.tvm.Tensor of type float16, float32.
hidden: akg.tvm.Tensor for hidden variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
h: akg.tvm.Tensor for hidden output variable of current cell.
"""
igates = dense(inputs, w_ih, b_ih, use_bias)
hgates = dense(hidden, w_hh, b_hh, use_bias)
h = Tanh(igates + hgates)
return h |
Python | def rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
RNN cell with relu non-linearity.
Args:
inputs: akg.tvm.Tensor of type float16, float32.
hidden: akg.tvm.Tensor for hidden variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
h: akg.tvm.Tensor for hidden output variable of current cell.
"""
igates = dense(inputs, w_ih, b_ih, use_bias)
hgates = dense(hidden, w_hh, b_hh, use_bias)
h = relu6(igates + hgates)
return h | def rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh, use_bias=True, target="cce"):
"""
RNN cell with relu non-linearity.
Args:
inputs: akg.tvm.Tensor of type float16, float32.
hidden: akg.tvm.Tensor for hidden variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
h: akg.tvm.Tensor for hidden output variable of current cell.
"""
igates = dense(inputs, w_ih, b_ih, use_bias)
hgates = dense(hidden, w_hh, b_hh, use_bias)
h = relu6(igates + hgates)
return h |
Python | def apply_ada_max(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon, target=utils.CCE):
"""
Update var according to the AdaMax algorithm.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float32.
m (tvm.tensor.Tensor): A tensor of same shape and type as var.
v (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var.
beta1 (tvm.tensor.Tensor): A scalar tensor of same type as var, 0.0 <= beta1 <= 1.0.
beta1_power (tvm.tensor.Tensor): The value of :math:`beta1^t`, a scalar tensor of same type as var.
beta2 (tvm.tensor.Tensor): A scalar tensor of same type as var, 0.0 <= beta2 <= 1.0.
epsilon (float): A small value to prevent division by 0.
Returns:
tvm.tensor.Tensor, Updated var.
tvm.tensor.Tensor, Updated m.
tvm.tensor.Tensor, Updated v.
"""
_check_inputs(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)
out_var, out_m, out_v = _apply_ada_max_compute(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)
# reuse var, m and v
out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf")
out_m, binds_info2 = TensorUtils.inplace_set(m, out_m, "m_buf")
out_v, binds_info3 = TensorUtils.inplace_set(v, out_v, "v_buf")
binds_info.update(binds_info2)
binds_info.update(binds_info3)
attrs = {utils.BINDS: binds_info}
return out_var, out_m, out_v, attrs | def apply_ada_max(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon, target=utils.CCE):
"""
Update var according to the AdaMax algorithm.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float32.
m (tvm.tensor.Tensor): A tensor of same shape and type as var.
v (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var.
beta1 (tvm.tensor.Tensor): A scalar tensor of same type as var, 0.0 <= beta1 <= 1.0.
beta1_power (tvm.tensor.Tensor): The value of :math:`beta1^t`, a scalar tensor of same type as var.
beta2 (tvm.tensor.Tensor): A scalar tensor of same type as var, 0.0 <= beta2 <= 1.0.
epsilon (float): A small value to prevent division by 0.
Returns:
tvm.tensor.Tensor, Updated var.
tvm.tensor.Tensor, Updated m.
tvm.tensor.Tensor, Updated v.
"""
_check_inputs(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)
out_var, out_m, out_v = _apply_ada_max_compute(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)
# reuse var, m and v
out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf")
out_m, binds_info2 = TensorUtils.inplace_set(m, out_m, "m_buf")
out_v, binds_info3 = TensorUtils.inplace_set(v, out_v, "v_buf")
binds_info.update(binds_info2)
binds_info.update(binds_info3)
attrs = {utils.BINDS: binds_info}
return out_var, out_m, out_v, attrs |
Python | def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Provide(buf.op, buf.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call']) | def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Provide(buf.op, buf.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call']) |
Python | def RecPositive(x, target=utils.CCE):
"""
Calculate 1/x when data in x are all positive, used by dsl tanh and focalloss_grad.
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32. data in x must be positive.
Returns:
tvm.tensor.Tensor, the same type as inputs.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
need_conv = product_is_mini() and x.dtype == "float32"
x_fp16 = x
if need_conv:
x_fp16 = x.astype("float16")
log = akg.topi.log(x_fp16)
neg_log = akg.topi.negative(log)
res = akg.lang.ascend.vexp(neg_log)
return res.astype(x.dtype) if need_conv else res | def RecPositive(x, target=utils.CCE):
"""
Calculate 1/x when data in x are all positive, used by dsl tanh and focalloss_grad.
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32. data in x must be positive.
Returns:
tvm.tensor.Tensor, the same type as inputs.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
need_conv = product_is_mini() and x.dtype == "float32"
x_fp16 = x
if need_conv:
x_fp16 = x.astype("float16")
log = akg.topi.log(x_fp16)
neg_log = akg.topi.negative(log)
res = akg.lang.ascend.vexp(neg_log)
return res.astype(x.dtype) if need_conv else res |
Python | def _newton(start_value, num_to_vrsqrt):
"""Do newton's method to calculate vrsqrt."""
x0_square = topi.multiply(start_value, start_value)
mul_res = topi.multiply(x0_square, num_to_vrsqrt)
mul_res = topi.multiply(mul_res, tvm.const(-1, "float32"))
head0_tmp = topi.add(mul_res, tvm.const(3, "float32"))
head0 = topi.multiply(head0_tmp, start_value)
newton_res = topi.multiply(head0, tvm.const(0.5, "float32"))
return newton_res | def _newton(start_value, num_to_vrsqrt):
"""Do newton's method to calculate vrsqrt."""
x0_square = topi.multiply(start_value, start_value)
mul_res = topi.multiply(x0_square, num_to_vrsqrt)
mul_res = topi.multiply(mul_res, tvm.const(-1, "float32"))
head0_tmp = topi.add(mul_res, tvm.const(3, "float32"))
head0 = topi.multiply(head0_tmp, start_value)
newton_res = topi.multiply(head0, tvm.const(0.5, "float32"))
return newton_res |
Python | def floormod(shape, dtype, kernel_name, attrs, target="cce"):
"""
Compute element-wise remainder of division.
\f$res=a - floor(a/b) * b\f$
Args:
shape (list): a list has any nums.
dtype (str): parameters' type.
kernel_name (str): a str about kernel_name.
attrs (str): Default None.
Returns:
tvm.tensor.Tensor, shape and dtype are input params.
"""
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
utils.check_shape(shape)
a = akg.tvm.placeholder(shape=shape, name="a", dtype=dtype)
b = akg.tvm.placeholder(shape=shape, name="b", dtype=dtype)
# res = a - floor(a/b) * b
# Newton's Method for VREC
para = akg.lang.ascend.vrec(b)
for _ in range(3):
tmp1 = akg.lang.ascend.vmul(b, para)
tmp2 = akg.lang.ascend.vmuls(tmp1, -1)
tmp3 = akg.lang.ascend.vadds(tmp2, 2)
para = akg.lang.ascend.vmul(tmp3, para)
c = akg.lang.ascend.vmul(a, para)
d = akg.lang.ascend.floor(c)
e = akg.lang.ascend.vmul(d, b)
res = akg.lang.ascend.vsub(a, e)
s = akg.tvm.create_schedule(res.op)
with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
mod = akg.build(s, [a, b, res], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
return mod | def floormod(shape, dtype, kernel_name, attrs, target="cce"):
"""
Compute element-wise remainder of division.
\f$res=a - floor(a/b) * b\f$
Args:
shape (list): a list has any nums.
dtype (str): parameters' type.
kernel_name (str): a str about kernel_name.
attrs (str): Default None.
Returns:
tvm.tensor.Tensor, shape and dtype are input params.
"""
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
utils.check_shape(shape)
a = akg.tvm.placeholder(shape=shape, name="a", dtype=dtype)
b = akg.tvm.placeholder(shape=shape, name="b", dtype=dtype)
# res = a - floor(a/b) * b
# Newton's Method for VREC
para = akg.lang.ascend.vrec(b)
for _ in range(3):
tmp1 = akg.lang.ascend.vmul(b, para)
tmp2 = akg.lang.ascend.vmuls(tmp1, -1)
tmp3 = akg.lang.ascend.vadds(tmp2, 2)
para = akg.lang.ascend.vmul(tmp3, para)
c = akg.lang.ascend.vmul(a, para)
d = akg.lang.ascend.floor(c)
e = akg.lang.ascend.vmul(d, b)
res = akg.lang.ascend.vsub(a, e)
s = akg.tvm.create_schedule(res.op)
with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
mod = akg.build(s, [a, b, res], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
return mod |
Python | def proposal_sort_tiling_strategy(tensor, tensor_shape):
"""
Custom tiling strategy for proposal_sort op
"""
strategy = list()
for i, sh in enumerate(tensor_shape):
if i == 0 and sh > 1:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)[0])
if i == 1 and sh > 4096:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=2)[0])
return strategy | def proposal_sort_tiling_strategy(tensor, tensor_shape):
"""
Custom tiling strategy for proposal_sort op
"""
strategy = list()
for i, sh in enumerate(tensor_shape):
if i == 0 and sh > 1:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)[0])
if i == 1 and sh > 4096:
strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=2)[0])
return strategy |
Python | def proposal_sort(data, topk, target="cce"):
"""
Computes the k largest entries from input.
Args:
data: akg.tvm.Tensor of type float16, float32.
topk: an integer indicating the top kth entries.
Returns:
sorted_data: akg.tvm.Tensor of top kth number of rows.
attr_map: optional parameter for setting tiling strategy.
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16)
bs, box_num, _ = data.shape
result_shape = (bs, topk, data.shape[-1])
attr_map = {}
if int(box_num) > 4096:
reducer = akg.tvm.comm_reducer(lambda x, y: dav.topk_sort(x, y, akg.tvm.const(topk, 'uint16')),
lambda t: akg.tvm.const(0, dtype=t), name="cor_reducer")
k = akg.tvm.reduce_axis((0, box_num), name='k')
sorted_data = akg.tvm.compute(result_shape, lambda bs, i, j: reducer(data[bs, k, j], axis=k), name="sort")
else:
reducer = akg.tvm.comm_reducer(lambda x, y: dav.proposal_sort(x, y, akg.tvm.const(topk, 'uint16')),
lambda t: akg.tvm.const(0, dtype=t), name="cor_reducer")
k = akg.tvm.reduce_axis((0, box_num), name='k')
sorted_data = akg.tvm.compute(result_shape,
lambda bs, i, j: reducer(data[bs, k, j], axis=k),
name="proposal_sort_output")
attr_map["custom_tiling"] = proposal_sort_tiling_strategy(sorted_data, get_shape(data))
return sorted_data, attr_map | def proposal_sort(data, topk, target="cce"):
"""
Computes the k largest entries from input.
Args:
data: akg.tvm.Tensor of type float16, float32.
topk: an integer indicating the top kth entries.
Returns:
sorted_data: akg.tvm.Tensor of top kth number of rows.
attr_map: optional parameter for setting tiling strategy.
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16)
bs, box_num, _ = data.shape
result_shape = (bs, topk, data.shape[-1])
attr_map = {}
if int(box_num) > 4096:
reducer = akg.tvm.comm_reducer(lambda x, y: dav.topk_sort(x, y, akg.tvm.const(topk, 'uint16')),
lambda t: akg.tvm.const(0, dtype=t), name="cor_reducer")
k = akg.tvm.reduce_axis((0, box_num), name='k')
sorted_data = akg.tvm.compute(result_shape, lambda bs, i, j: reducer(data[bs, k, j], axis=k), name="sort")
else:
reducer = akg.tvm.comm_reducer(lambda x, y: dav.proposal_sort(x, y, akg.tvm.const(topk, 'uint16')),
lambda t: akg.tvm.const(0, dtype=t), name="cor_reducer")
k = akg.tvm.reduce_axis((0, box_num), name='k')
sorted_data = akg.tvm.compute(result_shape,
lambda bs, i, j: reducer(data[bs, k, j], axis=k),
name="proposal_sort_output")
attr_map["custom_tiling"] = proposal_sort_tiling_strategy(sorted_data, get_shape(data))
return sorted_data, attr_map |
Python | def preprocess_position(position):
"""check position's value is valid and turn integer position into list"""
if isinstance(position, (list, tuple)):
for p in position:
if not isinstance(p, int):
raise TypeError("Position of tensor should be a integer")
elif isinstance(position, int):
position = [position]
else:
raise TypeError(
"Position of tensor should be a integer, list or a tuple")
return position | def preprocess_position(position):
"""check position's value is valid and turn integer position into list"""
if isinstance(position, (list, tuple)):
for p in position:
if not isinstance(p, int):
raise TypeError("Position of tensor should be a integer")
elif isinstance(position, int):
position = [position]
else:
raise TypeError(
"Position of tensor should be a integer, list or a tuple")
return position |
Python | def preprocess_value_with_position(values, position):
"""check value is valid and compatible with position, and turn integer into list"""
if isinstance(values, (list, tuple)):
if len(values) != len(position):
raise ValueError(
"Length of values is not compatible with position.")
for l in values:
if not isinstance(l, int):
raise TypeError(
"Dynamic shape values of tensor should be a integer or a list/tuple of integer")
elif isinstance(values, int):
values = [values]
else:
raise TypeError(
"Dynamic shape values of tensor should be a integer or a list/tuple of integer")
return values | def preprocess_value_with_position(values, position):
"""check value is valid and compatible with position, and turn integer into list"""
if isinstance(values, (list, tuple)):
if len(values) != len(position):
raise ValueError(
"Length of values is not compatible with position.")
for l in values:
if not isinstance(l, int):
raise TypeError(
"Dynamic shape values of tensor should be a integer or a list/tuple of integer")
elif isinstance(values, int):
values = [values]
else:
raise TypeError(
"Dynamic shape values of tensor should be a integer or a list/tuple of integer")
return values |
Python | def _newton_iter(data, init_x):
"""Do element-wise Newton compute."""
# Newton begin:x(n+1) = x(n)*(3-a*x(n)^2)/2
init_square = topi.multiply(init_x, init_x)
newton_res = topi.multiply(init_square, data)
newton_res = topi.multiply(newton_res, neg_one_const("float32"))
newton_res = topi.add(newton_res, tvm.const(3, "float32"))
newton_res = topi.multiply(newton_res, init_x)
newton_res = topi.multiply(newton_res, tvm.const(0.5, "float32"))
return newton_res | def _newton_iter(data, init_x):
"""Do element-wise Newton compute."""
# Newton begin:x(n+1) = x(n)*(3-a*x(n)^2)/2
init_square = topi.multiply(init_x, init_x)
newton_res = topi.multiply(init_square, data)
newton_res = topi.multiply(newton_res, neg_one_const("float32"))
newton_res = topi.add(newton_res, tvm.const(3, "float32"))
newton_res = topi.multiply(newton_res, init_x)
newton_res = topi.multiply(newton_res, tvm.const(0.5, "float32"))
return newton_res |
Python | def _sqrt(data):
"""Calculate sqrt by using three times newton iteration(Mini) or vsqrt(Cloud)."""
if product_is_mini():
data_sqrt = topi.rsqrt(data)
data_sqrt = _newton_iter(data, data_sqrt)
data_sqrt = _newton_iter(data, data_sqrt)
data_sqrt = _newton_iter(data, data_sqrt)
return topi.multiply(data, data_sqrt)
else:
return topi.sqrt(data) | def _sqrt(data):
"""Calculate sqrt by using three times newton iteration(Mini) or vsqrt(Cloud)."""
if product_is_mini():
data_sqrt = topi.rsqrt(data)
data_sqrt = _newton_iter(data, data_sqrt)
data_sqrt = _newton_iter(data, data_sqrt)
data_sqrt = _newton_iter(data, data_sqrt)
return topi.multiply(data, data_sqrt)
else:
return topi.sqrt(data) |
Python | def _taylor_compute(data_x, x_square=None):
"""Do arcsinx compute use the 15th order taylor expansion when 0 <= x <= BOUNDARY."""
if x_square is None:
x_square = topi.multiply(data_x, data_x)
else:
x_square = x_square
# asin(x) = x + 1/6*x^3 + 3/40*x^5 + 5/112*x^7 + ... + 13!!/(14!!*15)*x^15
res = topi.multiply(x_square, tvm.const(COEF[TAYLOR_COUNT], "float32"))
for temp in reversed(range(TAYLOR_COUNT)):
res = topi.add(res, tvm.const(COEF[temp], "float32"))
if temp == 0:
res = topi.multiply(res, data_x)
else:
res = topi.multiply(x_square, res)
return res | def _taylor_compute(data_x, x_square=None):
"""Do arcsinx compute use the 15th order taylor expansion when 0 <= x <= BOUNDARY."""
if x_square is None:
x_square = topi.multiply(data_x, data_x)
else:
x_square = x_square
# asin(x) = x + 1/6*x^3 + 3/40*x^5 + 5/112*x^7 + ... + 13!!/(14!!*15)*x^15
res = topi.multiply(x_square, tvm.const(COEF[TAYLOR_COUNT], "float32"))
for temp in reversed(range(TAYLOR_COUNT)):
res = topi.add(res, tvm.const(COEF[temp], "float32"))
if temp == 0:
res = topi.multiply(res, data_x)
else:
res = topi.multiply(x_square, res)
return res |
Python | def Asin(x, target=utils.CCE):
"""
Computes the trignometric inverse sine of `x` element-wise.
asin(x) = | arcsin(sqrt(1-x^2)) - HALF_PI, x belongs to [-1, -sqrt(2)/2)
| the 15th order taylor expansion, x belongs to [-sqrt(2)/2, sqrt(2)/2)
| HALF_PI - arcsin(sqrt(1-x^2)), x belongs to [sqrt(2)/2, 1]
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
Rerurns:
tvm.tensor.Tensor of same type and shape as x.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.check_shape(x.shape)
return _asin_compute(x, target) | def Asin(x, target=utils.CCE):
"""
Computes the trignometric inverse sine of `x` element-wise.
asin(x) = | arcsin(sqrt(1-x^2)) - HALF_PI, x belongs to [-1, -sqrt(2)/2)
| the 15th order taylor expansion, x belongs to [-sqrt(2)/2, sqrt(2)/2)
| HALF_PI - arcsin(sqrt(1-x^2)), x belongs to [sqrt(2)/2, 1]
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
Rerurns:
tvm.tensor.Tensor of same type and shape as x.
Supported Platforms:
'Ascend'
"""
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.check_shape(x.shape)
return _asin_compute(x, target) |
Python | def BatchNormAd(head, data, mean, var, gamma, data_format="DefaultFormat", axis=1, eps=1e-3, target=utils.CCE):
"""
Compute gradient for batch normalization operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Input tensor.
data (tvm.tensor.Tensor): Input tensor.
mean (tvm.tensor.Tensor): Input tensor.
var (tvm.tensor.Tensor): Input tensor.
gamma (tvm.tensor.Tensor): Input tensor.
data_format (str): Data format of input tensors.
axis (int): specify the channel axis when data_format is "DefaultFormat".
eps (float): small float added to variance to avoid dividing by zero.
Returns:
tvm.tensor.Tensor of same shape and type as head.
Supported Platforms:
'Ascend'
"""
supported_format = ["NCHW", "NHWC", "NC1HWC0", "DefaultFormat"]
if data_format not in supported_format:
raise RuntimeError("{} format is not supported by batch norm ad now.".format(data_format))
beta = akg.tvm.placeholder(gamma.shape, gamma.dtype, name="beta")
outputs = FusedBatchNorm(data, gamma, beta, mean, var, eps=eps,
is_training=True, data_format=data_format,
axis=axis, single_sum=True)
output = outputs[0]
AD_attrs = {"keep_dims": 1, "tensor_optimize": 1, "export_DOT": 1, "separate_output": 1}
grads = list(akg.differentiate(output, [data, gamma, beta], head, AD_attrs, [outputs[3], mean, outputs[4], var]))
auto_diff_outs = [grads[0], grads[1], grads[2]]
attrs = get_attrs()
dim_info, _ = set_dim_func(data)
if dim_info != "":
attrs["dim"] = dim_info
attrs["custom_tiling"] = batch_norm_tiling_strategy(auto_diff_outs, data_format)
return auto_diff_outs, attrs | def BatchNormAd(head, data, mean, var, gamma, data_format="DefaultFormat", axis=1, eps=1e-3, target=utils.CCE):
"""
Compute gradient for batch normalization operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Input tensor.
data (tvm.tensor.Tensor): Input tensor.
mean (tvm.tensor.Tensor): Input tensor.
var (tvm.tensor.Tensor): Input tensor.
gamma (tvm.tensor.Tensor): Input tensor.
data_format (str): Data format of input tensors.
axis (int): specify the channel axis when data_format is "DefaultFormat".
eps (float): small float added to variance to avoid dividing by zero.
Returns:
tvm.tensor.Tensor of same shape and type as head.
Supported Platforms:
'Ascend'
"""
supported_format = ["NCHW", "NHWC", "NC1HWC0", "DefaultFormat"]
if data_format not in supported_format:
raise RuntimeError("{} format is not supported by batch norm ad now.".format(data_format))
beta = akg.tvm.placeholder(gamma.shape, gamma.dtype, name="beta")
outputs = FusedBatchNorm(data, gamma, beta, mean, var, eps=eps,
is_training=True, data_format=data_format,
axis=axis, single_sum=True)
output = outputs[0]
AD_attrs = {"keep_dims": 1, "tensor_optimize": 1, "export_DOT": 1, "separate_output": 1}
grads = list(akg.differentiate(output, [data, gamma, beta], head, AD_attrs, [outputs[3], mean, outputs[4], var]))
auto_diff_outs = [grads[0], grads[1], grads[2]]
attrs = get_attrs()
dim_info, _ = set_dim_func(data)
if dim_info != "":
attrs["dim"] = dim_info
attrs["custom_tiling"] = batch_norm_tiling_strategy(auto_diff_outs, data_format)
return auto_diff_outs, attrs |
Python | def ConvBn1(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias=False, attrs=None, target=utils.CCE):
"""
Computes sums of 5-D convolutions and use convolution's fp32 result to compute first part of Fused_batch_norm.
Fused_batch_norm's first part:
\f[
m = N \times H \times W \\
\\mu_{tmp} = \\sum_{n, h, w}{\frac{x}{m}} \\
\\sigma^2_{tmp} = \\sum_{n, h, w}{\frac{x^2}{m}}
\f]
Args:
data (list[tvm.tensor.Tensor]): the size is 3 if use_bias else the size is 2;
data[0] Tensor of type float16 ,shape 5D (fN, fC // C0, C0, fH, fW)
data[1] Tensor of type float16 ,shape 4D (wC // C0 * wH * wW, wN // C0, C0, C0)
data[2] Tensor of type float16 ,shape 5D (1, wN // C0, 1, 1, 16)
fmap_shape (list[int]): [fN, fC, fH, fW]
filter_shape (list[int]): [wN, wC, wH, wW]
pad (list[int]): [pad_top, pad_bottom, pad_left, pad_right]
stride (list[int]): [stride_h, stride_w]
dilation (list[int]): [dilation_h, dilation_w]
use_bias (bool): bool var.
attrs (dict): dict with keys for example: conv_tile,bypass
Returns:
tvm.tensor.Tensor of same type as data, shape is 5D(oN, oC // C0, oH, oW, C0)
Supported Platforms:
'Ascend'
"""
if use_bias:
raise ValueError("do not support bias yet !!!")
block_size = 16
dim_info, conv_tile, bypass, _ = conv_set_dim_func(fmap_shape, filter_shape, pad, stride, dilation, use_bias,
block_size, attrs, conv_bn1_set_dim_map)
if attrs is None:
attrs = {"conv_tile": conv_tile, "bypass": bypass}
else:
attrs['conv_tile'] = conv_tile
attrs['bypass'] = bypass
conv_res_32 = conv_core(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs)
conv_res_16 = Cast(conv_res_32, "float16", utils.CCE)
axes = [3, 2, 0]
conv_res_32_shape = [x.value for x in conv_res_32.shape]
num = reduce(lambda i, j: i * j, [conv_res_32_shape[i] for i in axes])
avg_num = round(float(1) / float(num), 12)
res_sum = akg.topi.sum(conv_res_32, axes, keepdims=True)
mean = akg.lang.ascend.vmuls(res_sum, avg_num)
res_square = akg.tvm.compute(conv_res_32.shape, lambda *i: conv_res_32[i] * conv_res_32[i], name="res_square")
square_sum = akg.topi.sum(res_square, axes, keepdims=True)
var_part = akg.lang.ascend.vmuls(square_sum, avg_num)
# need pragma_force_rmselfdep to enable multicore using atomic add
# because default pragma_rmselfdep=1 will disable multicore of reduce axes
attrs = {"dim": dim_info, "enable_bisect_optimize": 0,
"pragma_rmselfdep": 0, "pragma_force_rmselfdep": 1}
return conv_res_16, var_part, mean, attrs | def ConvBn1(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias=False, attrs=None, target=utils.CCE):
"""
Computes sums of 5-D convolutions and use convolution's fp32 result to compute first part of Fused_batch_norm.
Fused_batch_norm's first part:
\f[
m = N \times H \times W \\
\\mu_{tmp} = \\sum_{n, h, w}{\frac{x}{m}} \\
\\sigma^2_{tmp} = \\sum_{n, h, w}{\frac{x^2}{m}}
\f]
Args:
data (list[tvm.tensor.Tensor]): the size is 3 if use_bias else the size is 2;
data[0] Tensor of type float16 ,shape 5D (fN, fC // C0, C0, fH, fW)
data[1] Tensor of type float16 ,shape 4D (wC // C0 * wH * wW, wN // C0, C0, C0)
data[2] Tensor of type float16 ,shape 5D (1, wN // C0, 1, 1, 16)
fmap_shape (list[int]): [fN, fC, fH, fW]
filter_shape (list[int]): [wN, wC, wH, wW]
pad (list[int]): [pad_top, pad_bottom, pad_left, pad_right]
stride (list[int]): [stride_h, stride_w]
dilation (list[int]): [dilation_h, dilation_w]
use_bias (bool): bool var.
attrs (dict): dict with keys for example: conv_tile,bypass
Returns:
tvm.tensor.Tensor of same type as data, shape is 5D(oN, oC // C0, oH, oW, C0)
Supported Platforms:
'Ascend'
"""
if use_bias:
raise ValueError("do not support bias yet !!!")
block_size = 16
dim_info, conv_tile, bypass, _ = conv_set_dim_func(fmap_shape, filter_shape, pad, stride, dilation, use_bias,
block_size, attrs, conv_bn1_set_dim_map)
if attrs is None:
attrs = {"conv_tile": conv_tile, "bypass": bypass}
else:
attrs['conv_tile'] = conv_tile
attrs['bypass'] = bypass
conv_res_32 = conv_core(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs)
conv_res_16 = Cast(conv_res_32, "float16", utils.CCE)
axes = [3, 2, 0]
conv_res_32_shape = [x.value for x in conv_res_32.shape]
num = reduce(lambda i, j: i * j, [conv_res_32_shape[i] for i in axes])
avg_num = round(float(1) / float(num), 12)
res_sum = akg.topi.sum(conv_res_32, axes, keepdims=True)
mean = akg.lang.ascend.vmuls(res_sum, avg_num)
res_square = akg.tvm.compute(conv_res_32.shape, lambda *i: conv_res_32[i] * conv_res_32[i], name="res_square")
square_sum = akg.topi.sum(res_square, axes, keepdims=True)
var_part = akg.lang.ascend.vmuls(square_sum, avg_num)
# need pragma_force_rmselfdep to enable multicore using atomic add
# because default pragma_rmselfdep=1 will disable multicore of reduce axes
attrs = {"dim": dim_info, "enable_bisect_optimize": 0,
"pragma_rmselfdep": 0, "pragma_force_rmselfdep": 1}
return conv_res_16, var_part, mean, attrs |
Python | def expand_dims_ad(head, data, axis, target="cce"):
"""Compute gradient of expand_dims operator using automatic differentiate."""
output = ExpandDims(data, axis, target=target)
jacs = list(akg.differentiate(output, [data], head))
return jacs[0] | def expand_dims_ad(head, data, axis, target="cce"):
"""Compute gradient of expand_dims operator using automatic differentiate."""
output = ExpandDims(data, axis, target=target)
jacs = list(akg.differentiate(output, [data], head))
return jacs[0] |
Python | def log1p_run(shape, dtype, kernel_name, attrs):
"""run function for dsl function log1p."""
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(log1p.log1p, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, inputs, output = gen_data(dtype, shape)
return mod, expect, (inputs, output)
return mod
mod = utils.op_build_test(log1p.log1p, [shape], [dtype], kernel_name=kernel_name, attrs=attrs)
expect, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
return inputs, output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-03, equal_nan=True) | def log1p_run(shape, dtype, kernel_name, attrs):
"""run function for dsl function log1p."""
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(log1p.log1p, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, inputs, output = gen_data(dtype, shape)
return mod, expect, (inputs, output)
return mod
mod = utils.op_build_test(log1p.log1p, [shape], [dtype], kernel_name=kernel_name, attrs=attrs)
expect, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
return inputs, output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-03, equal_nan=True) |
Python | def gen_data(dtype, shape):
"""Generates input, output and expect data."""
inputs = random_gaussian(shape, miu=1, sigma=0.2).astype(dtype)
inputs = np.abs(inputs)
expect = np.log1p(inputs)
output = np.full(expect.shape, np.nan, dtype)
return expect, inputs, output | def gen_data(dtype, shape):
"""Generates input, output and expect data."""
inputs = random_gaussian(shape, miu=1, sigma=0.2).astype(dtype)
inputs = np.abs(inputs)
expect = np.log1p(inputs)
output = np.full(expect.shape, np.nan, dtype)
return expect, inputs, output |
Python | def _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad):
"""compute the FOBOS algorithm with adagrad learning rate"""
dtype = var.dtype
if dtype == "float16":
# cast to float32 for higher accuracy
compute_type = "float32"
var, accum, lr, l1, l2, grad = [akg.topi.cast(t, compute_type) for t in [var, accum, lr, l1, l2, grad]]
shape = var.shape
accum_new = akg.tvm.compute(shape, lambda *indice: accum(*indice) + grad(*indice) * grad(*indice), name="accum_new")
accum_new_rsqrt = Rsqrt(accum_new, target="cce")
ada_lr = akg.topi.multiply(lr, accum_new_rsqrt)
var_new = apply_proximal_gradient_descent_impl(var, ada_lr, l1, l2, grad)
# cast to origin dtype
var_new, accum_new = [akg.topi.cast(t, dtype) if t.dtype != dtype else t for t in [var_new, accum_new]]
return var_new, accum_new | def _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad):
"""compute the FOBOS algorithm with adagrad learning rate"""
dtype = var.dtype
if dtype == "float16":
# cast to float32 for higher accuracy
compute_type = "float32"
var, accum, lr, l1, l2, grad = [akg.topi.cast(t, compute_type) for t in [var, accum, lr, l1, l2, grad]]
shape = var.shape
accum_new = akg.tvm.compute(shape, lambda *indice: accum(*indice) + grad(*indice) * grad(*indice), name="accum_new")
accum_new_rsqrt = Rsqrt(accum_new, target="cce")
ada_lr = akg.topi.multiply(lr, accum_new_rsqrt)
var_new = apply_proximal_gradient_descent_impl(var, ada_lr, l1, l2, grad)
# cast to origin dtype
var_new, accum_new = [akg.topi.cast(t, dtype) if t.dtype != dtype else t for t in [var_new, accum_new]]
return var_new, accum_new |
Python | def apply_proximal_adagrad(var, accum, lr, l1, l2, grad, target=utils.CCE):
"""
The FOBOS optimization algorithm with Adagrad learning rate.
Note:
accum_new = accum + grad * grad
ada_lr = lr * rsqrt(accum_new)
prox_var = var - ada_lr * grad
if l1 > 0:
var_new = Sign(prox_var)/(1+ada_lr*l2) * max{|prox_var|-ada_lr*l1,0}
else:
var_new = prox_var/(1+ada_lr*l2)
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
"""
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, lr, l1, l2, grad):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new = _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad)
(var_new, accum_new), binds_info = TensorUtils.inplace_set_tensors([var, accum], [var_new, accum_new])
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, attrs | def apply_proximal_adagrad(var, accum, lr, l1, l2, grad, target=utils.CCE):
"""
The FOBOS optimization algorithm with Adagrad learning rate.
Note:
accum_new = accum + grad * grad
ada_lr = lr * rsqrt(accum_new)
prox_var = var - ada_lr * grad
if l1 > 0:
var_new = Sign(prox_var)/(1+ada_lr*l2) * max{|prox_var|-ada_lr*l1,0}
else:
var_new = prox_var/(1+ada_lr*l2)
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
"""
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, lr, l1, l2, grad):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new = _apply_proximal_adagrad_compute(var, accum, lr, l1, l2, grad)
(var_new, accum_new), binds_info = TensorUtils.inplace_set_tensors([var, accum], [var_new, accum_new])
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, attrs |
Python | def script(pyfunc=None, intrinsics=None, capture=None):
"""Decorate a python function function as hybrid script.
The hybrid function support emulation mode and parsing to
the internal language IR.
Parameters
----------
intrinsics : An IntrinDef or a list of IntrinDef's
The definitions of intrinsics.
Returns
-------
hybrid_func : function
A decorated hybrid script function.
"""
if capture is None:
capture = {}
# Arguments validation.
if pyfunc is not None:
_internal_assert_arg_type(pyfunc, 'pyfunc', [_AssertCallable])
if intrinsics is not None:
_internal_assert_arg_type(intrinsics, 'intrinsics', [list, IntrinDef])
if isinstance(intrinsics, list):
for i, intrin_def in enumerate(intrinsics):
_internal_assert_arg_type(intrin_def,
'intrinsics[{}]'.format(i),
[IntrinDef])
closure_vars = dict(capture)
if pyfunc:
try:
closure_vars.update(inspect.getclosurevars(pyfunc).nonlocals)
closure_vars.update(inspect.getclosurevars(pyfunc).globals)
except AttributeError: # ignore error for python2
pass
def _script(pyfunc): # pylint: disable=missing-docstring
@functools.wraps(pyfunc)
def wrapped_func(func, *args, **kwargs): # pylint: disable=missing-docstring
from tvm.hybrid.util import _is_tvm_arg_types
if _is_tvm_arg_types(args):
_patch_intrins_to_calls(intrinsics=intrinsics)
_patch_intrins_to_runtime(intrinsics=intrinsics)
if capture.get("source_str"):
src = capture["source_str"]
else:
src = _pruned_source(func)
op = source_to_op(src, args, func.__globals__, closure_vars)
_unpatch_intrins_from_runtime(intrinsics=intrinsics)
_unpatch_intrins_from_calls(intrinsics=intrinsics)
return op
from .runtime import _enter_hybrid_runtime, _restore_runtime
_patch_intrins_to_runtime(intrinsics=intrinsics)
intersect = _enter_hybrid_runtime(func)
value = func(*args, **kwargs)
_restore_runtime(func, intersect)
_unpatch_intrins_from_runtime(intrinsics=intrinsics)
return value
return decorate(pyfunc, wrapped_func)
if pyfunc:
return _script(pyfunc)
return _script | def script(pyfunc=None, intrinsics=None, capture=None):
"""Decorate a python function function as hybrid script.
The hybrid function support emulation mode and parsing to
the internal language IR.
Parameters
----------
intrinsics : An IntrinDef or a list of IntrinDef's
The definitions of intrinsics.
Returns
-------
hybrid_func : function
A decorated hybrid script function.
"""
if capture is None:
capture = {}
# Arguments validation.
if pyfunc is not None:
_internal_assert_arg_type(pyfunc, 'pyfunc', [_AssertCallable])
if intrinsics is not None:
_internal_assert_arg_type(intrinsics, 'intrinsics', [list, IntrinDef])
if isinstance(intrinsics, list):
for i, intrin_def in enumerate(intrinsics):
_internal_assert_arg_type(intrin_def,
'intrinsics[{}]'.format(i),
[IntrinDef])
closure_vars = dict(capture)
if pyfunc:
try:
closure_vars.update(inspect.getclosurevars(pyfunc).nonlocals)
closure_vars.update(inspect.getclosurevars(pyfunc).globals)
except AttributeError: # ignore error for python2
pass
def _script(pyfunc): # pylint: disable=missing-docstring
@functools.wraps(pyfunc)
def wrapped_func(func, *args, **kwargs): # pylint: disable=missing-docstring
from tvm.hybrid.util import _is_tvm_arg_types
if _is_tvm_arg_types(args):
_patch_intrins_to_calls(intrinsics=intrinsics)
_patch_intrins_to_runtime(intrinsics=intrinsics)
if capture.get("source_str"):
src = capture["source_str"]
else:
src = _pruned_source(func)
op = source_to_op(src, args, func.__globals__, closure_vars)
_unpatch_intrins_from_runtime(intrinsics=intrinsics)
_unpatch_intrins_from_calls(intrinsics=intrinsics)
return op
from .runtime import _enter_hybrid_runtime, _restore_runtime
_patch_intrins_to_runtime(intrinsics=intrinsics)
intersect = _enter_hybrid_runtime(func)
value = func(*args, **kwargs)
_restore_runtime(func, intersect)
_unpatch_intrins_from_runtime(intrinsics=intrinsics)
return value
return decorate(pyfunc, wrapped_func)
if pyfunc:
return _script(pyfunc)
return _script |
Python | def Minimum(input1, input2, target=utils.CCE):
"""
Return the min value of two tensors element-wise.
Note:
minimum supports broadcasting.
Args:
input1: Tensor.
input2: Tensor. Has the same type as input1.
Returns:
Tensor, has the same type as inputs.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
utils.ops_dtype_check([input1.dtype, input2.dtype], utils.DtypeForDavinci.ALL_TYPES)
utils.elemwise_dtype_check(input1.dtype, input2.dtype)
dtype = input1.dtype
shape1 = [x.value for x in input1.shape]
shape2 = [x.value for x in input2.shape]
utils.check_shape(shape1)
utils.check_shape(shape2)
utils.auto_broadcast_check(shape1, shape2)
need_cast = True if target == utils.CCE and dtype in ["int8", "uint8"] else False
if need_cast:
input1 = Cast(input1, "float16", target)
input2 = Cast(input2, "float16", target)
res = akg.topi.minimum(input1, input2)
if need_cast:
res = Cast(res, dtype, target)
return res | def Minimum(input1, input2, target=utils.CCE):
"""
Return the min value of two tensors element-wise.
Note:
minimum supports broadcasting.
Args:
input1: Tensor.
input2: Tensor. Has the same type as input1.
Returns:
Tensor, has the same type as inputs.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
utils.ops_dtype_check([input1.dtype, input2.dtype], utils.DtypeForDavinci.ALL_TYPES)
utils.elemwise_dtype_check(input1.dtype, input2.dtype)
dtype = input1.dtype
shape1 = [x.value for x in input1.shape]
shape2 = [x.value for x in input2.shape]
utils.check_shape(shape1)
utils.check_shape(shape2)
utils.auto_broadcast_check(shape1, shape2)
need_cast = True if target == utils.CCE and dtype in ["int8", "uint8"] else False
if need_cast:
input1 = Cast(input1, "float16", target)
input2 = Cast(input2, "float16", target)
res = akg.topi.minimum(input1, input2)
if need_cast:
res = Cast(res, dtype, target)
return res |
Python | def softmax_grad_run(shape, dtype, axis, kernel_name, attrs=None):
"""run function for dsl function softmax_grad."""
if attrs is None:
attrs = {}
input_shapes = [shape, shape]
input_types = [dtype, dtype]
op_attrs = [axis]
attrs["pragma_disable_whole_component"] = False
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(softmax_grad.softmax_grad, input_shapes, input_types, op_attrs,
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
dy, expect, output, x = gen_data(axis, dtype, shape)
return mod, expect, (x, dy, output)
return mod
dy, expect, output, x = gen_data(axis, dtype, shape)
mod = utils.op_build_test(softmax_grad.softmax_grad, input_shapes, input_types, op_attrs,
kernel_name=kernel_name, attrs=attrs)
output = utils.mod_launch(mod, (x, dy, output), expect=expect)
return (x, dy), output, expect, compare_tensor(output, expect, rtol=5e-2, equal_nan=True) | def softmax_grad_run(shape, dtype, axis, kernel_name, attrs=None):
"""run function for dsl function softmax_grad."""
if attrs is None:
attrs = {}
input_shapes = [shape, shape]
input_types = [dtype, dtype]
op_attrs = [axis]
attrs["pragma_disable_whole_component"] = False
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(softmax_grad.softmax_grad, input_shapes, input_types, op_attrs,
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
dy, expect, output, x = gen_data(axis, dtype, shape)
return mod, expect, (x, dy, output)
return mod
dy, expect, output, x = gen_data(axis, dtype, shape)
mod = utils.op_build_test(softmax_grad.softmax_grad, input_shapes, input_types, op_attrs,
kernel_name=kernel_name, attrs=attrs)
output = utils.mod_launch(mod, (x, dy, output), expect=expect)
return (x, dy), output, expect, compare_tensor(output, expect, rtol=5e-2, equal_nan=True) |
Python | def gen_data(axis, dtype, shape):
"""Generates input, output and expect data."""
x = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
dy = random_gaussian(shape, miu=0, sigma=0.2).astype(dtype)
x_sub = x - np.max(x, axis=axis, keepdims=True)
x_sub_exp = np.exp(x_sub)
y = x_sub_exp / np.sum(x_sub_exp, axis=axis, keepdims=True)
y_grad = y * (1.0 - y)
expect = dy * y_grad
output = np.full(shape, -5.0, dtype)
return dy, expect, output, x | def gen_data(axis, dtype, shape):
"""Generates input, output and expect data."""
x = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
dy = random_gaussian(shape, miu=0, sigma=0.2).astype(dtype)
x_sub = x - np.max(x, axis=axis, keepdims=True)
x_sub_exp = np.exp(x_sub)
y = x_sub_exp / np.sum(x_sub_exp, axis=axis, keepdims=True)
y_grad = y * (1.0 - y)
expect = dy * y_grad
output = np.full(shape, -5.0, dtype)
return dy, expect, output, x |
Python | def apply_adagrad_da(var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step, target=utils.CCE):
"""
Update var according to the Adagrad Dual Averaging algorithm.
grad_accum += grad
grad_squared_accum += grad * grad
tmp_val = Sign(grad_accum) * max(|grad_accum|-l1*global_step, 0) if l1 > 0 else grad_accum
x_value = -1 * lr * tmp_val
y_value = l2 * global_step * lr + sqrt(grad_squared_accum)
var = x_value / y_value
Args:
var (tvm.tensor.Tensor): Input var to be updated of type float16, float32.
grad_accum (tvm.tensor.Tensor): Accumulation of the gradients of same shape and type as var.
grad_squared_accum (tvm.tensor.Tensor): Accumulation of the squared gradients of same shape and type as var.
grad (tvm.tensor.Tensor): Input grad of same shape and type as var.
lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var.
l1 (tvm.tensor.Tensor): L1 regularization, a scalar tensor of same type as var.
l2 (tvm.tensor.Tensor): L2 regularization, a scalar tensor of same type as var.
global_step (tvm.tensor.Tensor): Training step number, a scalar tensor of type int32.
Returns:
tvm.tensor.Tensor, the updated var.
tvm.tensor.Tensor, the updated grad_accum.
tvm.tensor.Tensor, the updated grad_squared_accum.
"""
_check_inputs(var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step)
out_var, out_ga, out_gsa = _apply_adagrad_da_compute(
var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step)
# reuse var, grad_accum and grad_squared_accum
out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf")
out_ga, binds_info2 = TensorUtils.inplace_set(grad_accum, out_ga, "grad_accum_buf")
out_gsa, binds_info3 = TensorUtils.inplace_set(grad_squared_accum, out_gsa, "grad_squared_accum_buf")
binds_info.update(binds_info2)
binds_info.update(binds_info3)
attrs = {utils.BINDS: binds_info}
return out_var, out_ga, out_gsa, attrs | def apply_adagrad_da(var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step, target=utils.CCE):
"""
Update var according to the Adagrad Dual Averaging algorithm.
grad_accum += grad
grad_squared_accum += grad * grad
tmp_val = Sign(grad_accum) * max(|grad_accum|-l1*global_step, 0) if l1 > 0 else grad_accum
x_value = -1 * lr * tmp_val
y_value = l2 * global_step * lr + sqrt(grad_squared_accum)
var = x_value / y_value
Args:
var (tvm.tensor.Tensor): Input var to be updated of type float16, float32.
grad_accum (tvm.tensor.Tensor): Accumulation of the gradients of same shape and type as var.
grad_squared_accum (tvm.tensor.Tensor): Accumulation of the squared gradients of same shape and type as var.
grad (tvm.tensor.Tensor): Input grad of same shape and type as var.
lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var.
l1 (tvm.tensor.Tensor): L1 regularization, a scalar tensor of same type as var.
l2 (tvm.tensor.Tensor): L2 regularization, a scalar tensor of same type as var.
global_step (tvm.tensor.Tensor): Training step number, a scalar tensor of type int32.
Returns:
tvm.tensor.Tensor, the updated var.
tvm.tensor.Tensor, the updated grad_accum.
tvm.tensor.Tensor, the updated grad_squared_accum.
"""
_check_inputs(var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step)
out_var, out_ga, out_gsa = _apply_adagrad_da_compute(
var, grad_accum, grad_squared_accum, grad, lr, l1, l2, global_step)
# reuse var, grad_accum and grad_squared_accum
out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf")
out_ga, binds_info2 = TensorUtils.inplace_set(grad_accum, out_ga, "grad_accum_buf")
out_gsa, binds_info3 = TensorUtils.inplace_set(grad_squared_accum, out_gsa, "grad_squared_accum_buf")
binds_info.update(binds_info2)
binds_info.update(binds_info3)
attrs = {utils.BINDS: binds_info}
return out_var, out_ga, out_gsa, attrs |
Python | def blas_axby_ad(head, alpha, beta, target="cce"):
"""Compute gradient of blas_axby operator using automatic differentiate."""
x = akg.tvm.placeholder(head.shape, head.dtype, "inputx")
y = akg.tvm.placeholder(head.shape, head.dtype, "inputy")
op = blas_axby.blas_axby(x, y, alpha, beta)
jacs = list(akg.differentiate(op, [x, y], head))
return jacs[0], jacs[1] | def blas_axby_ad(head, alpha, beta, target="cce"):
"""Compute gradient of blas_axby operator using automatic differentiate."""
x = akg.tvm.placeholder(head.shape, head.dtype, "inputx")
y = akg.tvm.placeholder(head.shape, head.dtype, "inputy")
op = blas_axby.blas_axby(x, y, alpha, beta)
jacs = list(akg.differentiate(op, [x, y], head))
return jacs[0], jacs[1] |
Python | def matrix_set_diag(input_matrix, input_diagonal, input_help):
"""
Return a batched matrix tensor with new batched diagonal values.
Args:
input_matrix (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8. The last two dimensions
can be unequal.
input_diagonal (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8.The last shape need equal
to min(input_matrix[-1], input_matrix[-2]).
input_help (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8,and with a diagonal element of 1
and other positions of 0.
Returns:
tvm.tensor.Tensor, has the same type and shape as input_matrix.
"""
shape_input = get_shape(input_matrix)
shape_diag = get_shape(input_diagonal)
shape_help = get_shape(input_help)
dtype = input_matrix.dtype
utils.check_shape(shape_input)
utils.check_shape(shape_diag)
utils.check_shape(shape_help)
# Check help_matrix.
if (len(shape_input) < 2) or (len(shape_help) < 2):
raise RuntimeError("Only the rank of input tensors >= 2 are supported!")
utils.elemwise_shape_check(shape_input, shape_help)
# Check support dtype.
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8,
utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8])
# Adjust diag's shape according to input shape.
# Extend the shape_diag dimension for broadcast.
# if input_shape is [2,4,7,9] and shape_diag is [2,4,7] then new_shape is [2,4,7,1]
# if input_shape is [2,4,9,7] and shape_diag is [2,4,7], then new_shape is [2,4,1,7]
if shape_input[-2] <= shape_input[-1]:
shape_b_newshape = list(shape_diag) + [1]
# The penultimate dimension of the shape_diag is extended for broadcast.
else:
shape_b_newshape = list(shape_diag)
shape_b_newshape.insert(-1, 1)
input_diagonal = topi.reshape(input_diagonal, shape_b_newshape)
res = matrix_set_diag_compute(input_matrix, input_diagonal, input_help)
return res | def matrix_set_diag(input_matrix, input_diagonal, input_help):
"""
Return a batched matrix tensor with new batched diagonal values.
Args:
input_matrix (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8. The last two dimensions
can be unequal.
input_diagonal (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8.The last shape need equal
to min(input_matrix[-1], input_matrix[-2]).
input_help (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8,and with a diagonal element of 1
and other positions of 0.
Returns:
tvm.tensor.Tensor, has the same type and shape as input_matrix.
"""
shape_input = get_shape(input_matrix)
shape_diag = get_shape(input_diagonal)
shape_help = get_shape(input_help)
dtype = input_matrix.dtype
utils.check_shape(shape_input)
utils.check_shape(shape_diag)
utils.check_shape(shape_help)
# Check help_matrix.
if (len(shape_input) < 2) or (len(shape_help) < 2):
raise RuntimeError("Only the rank of input tensors >= 2 are supported!")
utils.elemwise_shape_check(shape_input, shape_help)
# Check support dtype.
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8,
utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8])
# Adjust diag's shape according to input shape.
# Extend the shape_diag dimension for broadcast.
# if input_shape is [2,4,7,9] and shape_diag is [2,4,7] then new_shape is [2,4,7,1]
# if input_shape is [2,4,9,7] and shape_diag is [2,4,7], then new_shape is [2,4,1,7]
if shape_input[-2] <= shape_input[-1]:
shape_b_newshape = list(shape_diag) + [1]
# The penultimate dimension of the shape_diag is extended for broadcast.
else:
shape_b_newshape = list(shape_diag)
shape_b_newshape.insert(-1, 1)
input_diagonal = topi.reshape(input_diagonal, shape_b_newshape)
res = matrix_set_diag_compute(input_matrix, input_diagonal, input_help)
return res |
Python | def ReluAd(head, a, target=utils.CCE):
"""
Compute gradient of relu operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
a (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor with the same shape as input.
Supported Platforms:
'Ascend'
"""
dim_info, _ = relu_ad_set_dim_func(head, a)
attrs = {DIM: dim_info}
b = Relu(a)
jacs = list(akg.differentiate(b, [a], head))
return jacs[0], attrs | def ReluAd(head, a, target=utils.CCE):
"""
Compute gradient of relu operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
a (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor with the same shape as input.
Supported Platforms:
'Ascend'
"""
dim_info, _ = relu_ad_set_dim_func(head, a)
attrs = {DIM: dim_info}
b = Relu(a)
jacs = list(akg.differentiate(b, [a], head))
return jacs[0], attrs |
Python | def ReLU6Grad(y_grad, x, target=utils.CUDA):
"""
Computes Gradients of Rectified Linear 6.
Args:
y_grad (tvm.tensor.Tensor): Tensor of type float16, float32, gradients backpropagated to the ReLU6 op.
x (tvm.tensor.Tensor): Tensor of type float16/float32, inputs that where passed to the ReLU6 op, or its outputs.
Returns:
tvm.tensor.Tensor, has same type and shape as x.
Supported Platforms:
'GPU'
"""
if target != utils.CUDA:
raise RuntimeError("the target %s is not supported!" % target)
shape = x.shape
dtype = x.dtype
zero = tvm.const(0, dtype)
six = tvm.const(6, dtype)
res0 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) >= zero, x(*i), zero))
res6 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) >= six, zero, res0(*i)))
res = tvm.compute(shape, lambda *i: tvm.if_then_else(res6(*i) == zero, zero, y_grad(*i)))
return res | def ReLU6Grad(y_grad, x, target=utils.CUDA):
"""
Computes Gradients of Rectified Linear 6.
Args:
y_grad (tvm.tensor.Tensor): Tensor of type float16, float32, gradients backpropagated to the ReLU6 op.
x (tvm.tensor.Tensor): Tensor of type float16/float32, inputs that where passed to the ReLU6 op, or its outputs.
Returns:
tvm.tensor.Tensor, has same type and shape as x.
Supported Platforms:
'GPU'
"""
if target != utils.CUDA:
raise RuntimeError("the target %s is not supported!" % target)
shape = x.shape
dtype = x.dtype
zero = tvm.const(0, dtype)
six = tvm.const(6, dtype)
res0 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) >= zero, x(*i), zero))
res6 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) >= six, zero, res0(*i)))
res = tvm.compute(shape, lambda *i: tvm.if_then_else(res6(*i) == zero, zero, y_grad(*i)))
return res |
Python | def maxpool_with_argmax_tiling_strategy(data, kernel, stride, pad):
"""Custom tiling for maxpool with argmax version."""
batch, c1, fm_h, fm_w, c0 = data.shape
_, [out_h, _] = \
cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad)
strategy = list()
if data.ndim == 5 and c0.value == 16:
h_cut = out_h
if isinstance(fm_h, akg.tvm.expr.Var) or (fm_h.value >= 50 and fm_w.value >= 50):
h_cut = 3
dim_ind = 0
if isinstance(batch, akg.tvm.expr.Var) or batch.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
dim_ind = dim_ind + 1
if isinstance(c1, akg.tvm.expr.Var) or c1.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=h_cut,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
strategy += ct_util.create_constraint_on_axis(values="H",
constraints=ct_util.TileConstraint.SET_AXIS_INFO,
axis=dim_ind)
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
axis=dim_ind + 1)
strategy += ct_util.create_constraint_on_axis(values=5,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind + 2)
strategy += ct_util.create_constraint_on_axis(values=16,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind + 3)
return strategy | def maxpool_with_argmax_tiling_strategy(data, kernel, stride, pad):
"""Custom tiling for maxpool with argmax version."""
batch, c1, fm_h, fm_w, c0 = data.shape
_, [out_h, _] = \
cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad)
strategy = list()
if data.ndim == 5 and c0.value == 16:
h_cut = out_h
if isinstance(fm_h, akg.tvm.expr.Var) or (fm_h.value >= 50 and fm_w.value >= 50):
h_cut = 3
dim_ind = 0
if isinstance(batch, akg.tvm.expr.Var) or batch.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
dim_ind = dim_ind + 1
if isinstance(c1, akg.tvm.expr.Var) or c1.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=h_cut,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind)
strategy += ct_util.create_constraint_on_axis(values="H",
constraints=ct_util.TileConstraint.SET_AXIS_INFO,
axis=dim_ind)
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
axis=dim_ind + 1)
strategy += ct_util.create_constraint_on_axis(values=5,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind + 2)
strategy += ct_util.create_constraint_on_axis(values=16,
constraints=ct_util.TileConstraint.FACTOR,
axis=dim_ind + 3)
return strategy |
Python | def maxpool_with_argmax_dynamic_tensor_strategy(data, im2col, mask):
"""Custom tiling for maxpool with argmax version."""
_, _, _, _, c0 = data.shape
strategy = list()
if data.ndim == 5 and c0.value == 16:
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=1)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=2)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=3)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=4)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=5)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=6)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=1)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=2)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=3)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=4)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=5)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=6)
return strategy | def maxpool_with_argmax_dynamic_tensor_strategy(data, im2col, mask):
"""Custom tiling for maxpool with argmax version."""
_, _, _, _, c0 = data.shape
strategy = list()
if data.ndim == 5 and c0.value == 16:
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=1)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=2)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=3)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=4)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=5)
strategy += ct_util.create_constraint_on_tensor(tensor=im2col,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=6)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=0)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=1)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=2)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values=1,
constraints=ct_util.TileConstraint.FACTOR,
tensor_pos=3)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=4)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=5)
strategy += ct_util.create_constraint_on_tensor(tensor=mask,
values="FULL",
constraints=ct_util.TileConstraint.MAX,
tensor_pos=6)
return strategy |
Python | def maxpool_with_argmax_custom_tiling_strategy(data):
"""Custom tiling for maxpool with argmax version."""
batch, c1, _, _, c0 = data.shape
strategy = list()
if data.ndim == 5 and c0.value == 16:
band = 1
dim_ind = 0
if isinstance(batch, akg.tvm.expr.Var) or batch.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
if isinstance(c1, akg.tvm.expr.Var) or c1.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
band = 0
dim_ind = 0
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
return strategy | def maxpool_with_argmax_custom_tiling_strategy(data):
"""Custom tiling for maxpool with argmax version."""
batch, c1, _, _, c0 = data.shape
strategy = list()
if data.ndim == 5 and c0.value == 16:
band = 1
dim_ind = 0
if isinstance(batch, akg.tvm.expr.Var) or batch.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
if isinstance(c1, akg.tvm.expr.Var) or c1.value > 1:
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
band = 0
dim_ind = 0
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values=1,
constraints=ct_util.TileConstraint.FACTOR,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
dim_ind = dim_ind + 1
strategy += ct_util.create_constraint_on_axis(values="FULL",
constraints=ct_util.TileConstraint.MAX,
band=band,
axis=dim_ind)
return strategy |
Python | def MaxpoolWithArgmaxDynamic(data, kernel, stride, strategy):
"""
Performs the max pooling on the input datas.
Note:
Only support 5D format(NC1HWC0), and pooling will work on H and W.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): two int numbers for pooling window's size.
stride (Union[list, tuple]): two int numbers for window's stride.
strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or
instance of list(four int numbers, as 'CONSTANTS' strategy).
Support **Strategies** is the same as avgpool.
Returns:
tvm.tensor.Tensor, result for gradient of maxpooling.
"""
attrs = get_dynamic_attrs()
dim_info = maxpool_with_argmax_set_dim_func(data, kernel, stride, strategy)[0]
for k, v in attr_map_v2.items():
attrs[k] = v
if dim_info != "":
attrs['dim'] = dim_info
# attrs["custom_tiling"] = maxpool_with_argmax_custom_tiling_strategy(data)
attrs["enable_feature_library"] = True
shape = get_shape(data)
dtype = data.dtype
utils.davinci_format_check(shape, "NC1HWC0", dim=5)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16)
utils.check_shape(kernel, 2, 'Kernel')
utils.check_shape(stride, 2, 'Stride')
pad_strategy_check(strategy)
kernel_h, kernel_w = kernel
in_n, in_c1, _, _, in_c0 = shape
[ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \
cal_pad_shapes_by_strategy(shape, kernel, stride, strategy)
pad = [ph_h, ph_t, pw_h, pw_t]
zero = akg.tvm.const(0.0, dtype=dtype)
min_value = akg.tvm.const(-65504.0 if dtype == 'float16'
else -340282346638528859811704183484516925440.0, dtype=dtype)
# fmap img2col l1 -> ub in zZ format by fractal
fmap_img2col_shape_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
fmap_img2col_ub = img2col(data, fmap_img2col_shape_ub, kernel_h, kernel_w,
pad, stride, min_value, tag='')
out_shape = (in_n, in_c1, out_h, out_w, in_c0)
reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h")
reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w")
output = akg.tvm.compute(out_shape,
lambda n, c1, oh, ow, c0:
akg.tvm.max(
fmap_img2col_ub[n, c1, reduce_axis_h,
reduce_axis_w, oh, ow, c0],
axis=[reduce_axis_h, reduce_axis_w]),
name="pooling_max")
zero = akg.tvm.const(0.0, dtype=dtype)
mask_first_max_shape = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
mask_first_max = akg.tvm.compute(mask_first_max_shape, lambda *indice: zero, name="mask_first_max")
attrs["custom_tiling"] = maxpool_with_argmax_dynamic_tensor_strategy(
data, fmap_img2col_ub, mask_first_max)
attrs["dynamic_shape"] = ds.set_dynamic_shape_limit_for_tensor(output, [64, 64], [2, 3])
return output, mask_first_max, attrs | def MaxpoolWithArgmaxDynamic(data, kernel, stride, strategy):
"""
Performs the max pooling on the input datas.
Note:
Only support 5D format(NC1HWC0), and pooling will work on H and W.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): two int numbers for pooling window's size.
stride (Union[list, tuple]): two int numbers for window's stride.
strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or
instance of list(four int numbers, as 'CONSTANTS' strategy).
Support **Strategies** is the same as avgpool.
Returns:
tvm.tensor.Tensor, result for gradient of maxpooling.
"""
attrs = get_dynamic_attrs()
dim_info = maxpool_with_argmax_set_dim_func(data, kernel, stride, strategy)[0]
for k, v in attr_map_v2.items():
attrs[k] = v
if dim_info != "":
attrs['dim'] = dim_info
# attrs["custom_tiling"] = maxpool_with_argmax_custom_tiling_strategy(data)
attrs["enable_feature_library"] = True
shape = get_shape(data)
dtype = data.dtype
utils.davinci_format_check(shape, "NC1HWC0", dim=5)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16)
utils.check_shape(kernel, 2, 'Kernel')
utils.check_shape(stride, 2, 'Stride')
pad_strategy_check(strategy)
kernel_h, kernel_w = kernel
in_n, in_c1, _, _, in_c0 = shape
[ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \
cal_pad_shapes_by_strategy(shape, kernel, stride, strategy)
pad = [ph_h, ph_t, pw_h, pw_t]
zero = akg.tvm.const(0.0, dtype=dtype)
min_value = akg.tvm.const(-65504.0 if dtype == 'float16'
else -340282346638528859811704183484516925440.0, dtype=dtype)
# fmap img2col l1 -> ub in zZ format by fractal
fmap_img2col_shape_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
fmap_img2col_ub = img2col(data, fmap_img2col_shape_ub, kernel_h, kernel_w,
pad, stride, min_value, tag='')
out_shape = (in_n, in_c1, out_h, out_w, in_c0)
reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h")
reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w")
output = akg.tvm.compute(out_shape,
lambda n, c1, oh, ow, c0:
akg.tvm.max(
fmap_img2col_ub[n, c1, reduce_axis_h,
reduce_axis_w, oh, ow, c0],
axis=[reduce_axis_h, reduce_axis_w]),
name="pooling_max")
zero = akg.tvm.const(0.0, dtype=dtype)
mask_first_max_shape = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
mask_first_max = akg.tvm.compute(mask_first_max_shape, lambda *indice: zero, name="mask_first_max")
attrs["custom_tiling"] = maxpool_with_argmax_dynamic_tensor_strategy(
data, fmap_img2col_ub, mask_first_max)
attrs["dynamic_shape"] = ds.set_dynamic_shape_limit_for_tensor(output, [64, 64], [2, 3])
return output, mask_first_max, attrs |
Python | def MaxPoolWithArgmax(data, kernel, stride, strategy, target=utils.CCE):
"""
Performs the max pooling on the input datas.
Note:
Only support 5D format(NC1HWC0), and pooling will work on H and W.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): two int numbers for pooling window's size.
stride (Union[list, tuple]): two int numbers for window's stride.
strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or
instance of list(four int numbers, as 'CONSTANTS' strategy).
Support **Strategies** is the same as avgpool.
Returns:
tvm.tensor.Tensor, result for gradient of maxpooling.
"""
attrs = get_attrs()
dim_info = maxpool_with_argmax_set_dim_func(data, kernel, stride, strategy)[0]
for k, v in attr_map_v2.items():
attrs[k] = v
if dim_info != "":
attrs['dim'] = dim_info
attrs["custom_tiling"] = maxpool_with_argmax_tiling_strategy(data, kernel, stride, strategy)
shape = get_shape(data)
dtype = data.dtype
utils.davinci_format_check(shape, "NC1HWC0", dim=5)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16)
utils.check_shape(kernel, 2, 'Kernel')
utils.check_shape(stride, 2, 'Stride')
pad_strategy_check(strategy)
kernel_h, kernel_w = kernel
in_n, in_c1, _, _, in_c0 = shape
[ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \
cal_pad_shapes_by_strategy(shape, kernel, stride, strategy)
pad = [ph_h, ph_t, pw_h, pw_t]
zero = akg.tvm.const(0.0, dtype=dtype)
one = akg.tvm.const(1.0, dtype=dtype)
min_value = akg.tvm.const(-65504.0 if dtype == 'float16'
else -340282346638528859811704183484516925440.0, dtype=dtype)
# fmap img2col l1 -> ub in zZ format by fractal
fmap_img2col_shape_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
fmap_img2col_ub = img2col(data, fmap_img2col_shape_ub, kernel_h, kernel_w,
pad, stride, min_value, tag='')
out_shape = (in_n, in_c1, out_h, out_w, in_c0)
reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h")
reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w")
output = akg.tvm.compute(out_shape,
lambda n, c1, oh, ow, c0:
akg.tvm.max(
fmap_img2col_ub[n, c1, reduce_axis_h,
reduce_axis_w, oh, ow, c0],
axis=[reduce_axis_h, reduce_axis_w]),
name="pooling_max")
pooling_mask = akg.tvm.compute(fmap_img2col_shape_ub,
lambda n, c1, kh, kw, oh, ow, c0:
akg.tvm.if_then_else(
fmap_img2col_ub[n, c1, kh, kw, oh, ow, c0]
< output[n, c1, oh, ow, c0], zero, one),
name="pooling_mask")
mask_flag = akg.tvm.compute(
out_shape,
lambda n, c1, oh, ow, c0: pooling_mask[n, c1, 0, 0, oh, ow, c0],
name="mask_flag")
mask_init = akg.tvm.compute(
out_shape,
lambda n, c1, oh, ow, c0: pooling_mask[n, c1, 0, 0, oh, ow, c0],
name="mask_init")
# spec 2
@script(capture=locals())
def hybrid_first_max(mask_, flag_, flag2_, zero_, one_):
output_ = allocate((in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0), mask_.dtype, 'local')
for n_i in range(in_n):
for c1_i in range(in_c1):
for oh_i in range(out_h):
for ow_i in range(out_w):
for c0_i in range(in_c0):
output_[n_i, c1_i, 0, 0, oh_i, ow_i, c0_i] = flag2_[n_i, c1_i, oh_i, ow_i, c0_i]
for kh_i in range(kernel_h):
for kw_i in range(kernel_w):
for oh_i in range(out_h):
for ow_i in range(out_w):
for c0_i in range(in_c0):
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] = \
mask_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] -\
flag_[n_i, c1_i, oh_i, ow_i, c0_i]
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] = \
max(output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i], zero_)
flag_[n_i, c1_i, oh_i, ow_i, c0_i] =\
flag_[n_i, c1_i, oh_i, ow_i, c0_i] +\
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i]
return output_
mask_first_max = hybrid_first_max(pooling_mask, mask_flag, mask_init, zero, one)
return output, mask_first_max, attrs | def MaxPoolWithArgmax(data, kernel, stride, strategy, target=utils.CCE):
"""
Performs the max pooling on the input datas.
Note:
Only support 5D format(NC1HWC0), and pooling will work on H and W.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): two int numbers for pooling window's size.
stride (Union[list, tuple]): two int numbers for window's stride.
strategy (Union[str, list, tuple]): padding, should be 'VALID','SAME' or
instance of list(four int numbers, as 'CONSTANTS' strategy).
Support **Strategies** is the same as avgpool.
Returns:
tvm.tensor.Tensor, result for gradient of maxpooling.
"""
attrs = get_attrs()
dim_info = maxpool_with_argmax_set_dim_func(data, kernel, stride, strategy)[0]
for k, v in attr_map_v2.items():
attrs[k] = v
if dim_info != "":
attrs['dim'] = dim_info
attrs["custom_tiling"] = maxpool_with_argmax_tiling_strategy(data, kernel, stride, strategy)
shape = get_shape(data)
dtype = data.dtype
utils.davinci_format_check(shape, "NC1HWC0", dim=5)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT16)
utils.check_shape(kernel, 2, 'Kernel')
utils.check_shape(stride, 2, 'Stride')
pad_strategy_check(strategy)
kernel_h, kernel_w = kernel
in_n, in_c1, _, _, in_c0 = shape
[ph_h, ph_t, pw_h, pw_t], [out_h, out_w] = \
cal_pad_shapes_by_strategy(shape, kernel, stride, strategy)
pad = [ph_h, ph_t, pw_h, pw_t]
zero = akg.tvm.const(0.0, dtype=dtype)
one = akg.tvm.const(1.0, dtype=dtype)
min_value = akg.tvm.const(-65504.0 if dtype == 'float16'
else -340282346638528859811704183484516925440.0, dtype=dtype)
# fmap img2col l1 -> ub in zZ format by fractal
fmap_img2col_shape_ub = (in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0)
fmap_img2col_ub = img2col(data, fmap_img2col_shape_ub, kernel_h, kernel_w,
pad, stride, min_value, tag='')
out_shape = (in_n, in_c1, out_h, out_w, in_c0)
reduce_axis_h = akg.tvm.reduce_axis((0, kernel_h), name="reduce_h")
reduce_axis_w = akg.tvm.reduce_axis((0, kernel_w), name="reduce_w")
output = akg.tvm.compute(out_shape,
lambda n, c1, oh, ow, c0:
akg.tvm.max(
fmap_img2col_ub[n, c1, reduce_axis_h,
reduce_axis_w, oh, ow, c0],
axis=[reduce_axis_h, reduce_axis_w]),
name="pooling_max")
pooling_mask = akg.tvm.compute(fmap_img2col_shape_ub,
lambda n, c1, kh, kw, oh, ow, c0:
akg.tvm.if_then_else(
fmap_img2col_ub[n, c1, kh, kw, oh, ow, c0]
< output[n, c1, oh, ow, c0], zero, one),
name="pooling_mask")
mask_flag = akg.tvm.compute(
out_shape,
lambda n, c1, oh, ow, c0: pooling_mask[n, c1, 0, 0, oh, ow, c0],
name="mask_flag")
mask_init = akg.tvm.compute(
out_shape,
lambda n, c1, oh, ow, c0: pooling_mask[n, c1, 0, 0, oh, ow, c0],
name="mask_init")
# spec 2
@script(capture=locals())
def hybrid_first_max(mask_, flag_, flag2_, zero_, one_):
output_ = allocate((in_n, in_c1, kernel_h, kernel_w, out_h, out_w, in_c0), mask_.dtype, 'local')
for n_i in range(in_n):
for c1_i in range(in_c1):
for oh_i in range(out_h):
for ow_i in range(out_w):
for c0_i in range(in_c0):
output_[n_i, c1_i, 0, 0, oh_i, ow_i, c0_i] = flag2_[n_i, c1_i, oh_i, ow_i, c0_i]
for kh_i in range(kernel_h):
for kw_i in range(kernel_w):
for oh_i in range(out_h):
for ow_i in range(out_w):
for c0_i in range(in_c0):
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] = \
mask_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] -\
flag_[n_i, c1_i, oh_i, ow_i, c0_i]
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i] = \
max(output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i], zero_)
flag_[n_i, c1_i, oh_i, ow_i, c0_i] =\
flag_[n_i, c1_i, oh_i, ow_i, c0_i] +\
output_[n_i, c1_i, kh_i, kw_i, oh_i, ow_i, c0_i]
return output_
mask_first_max = hybrid_first_max(pooling_mask, mask_flag, mask_init, zero, one)
return output, mask_first_max, attrs |
Python | def cross(x, y, target=utils.CCE):
"""
Compute cross product of x and y.
Note:
The first dim of x or y must be 3, it will be calculated as (two dims for example)
.. math::
res = x \\times y = \\left[ \\begin{matrix}
l, & \\cdots \\\\ m, & \\cdots \\\\ n, & \\cdots
\\end{matrix} \\right] \\times \\left[ \\begin{matrix}
o, & \\cdots \\\\ p, & \\cdots \\\\ q, & \\cdots
\\end{matrix} \\right] = \\left[ \\begin{matrix}
mq-np, & \\cdots \\\\ no-lq, & \\cdots \\\\ lp-mo, & \\cdots \\\\
\\end{matrix} \\right]
Args:
x (tvm.tensor.Tensor): Input tensor, only support float16, float32,
int32, int8, uint8.
y (tvm.tensor.Tensor): Input tensor, must have the same shape and dtype
as x.
Returns:
A tvm.tensor.Tensor with the same shape and dtype as x.
"""
utils.elemwise_shape_check(get_shape(y), get_shape(x))
utils.elemwise_dtype_check(
y.dtype, x.dtype,
(utils.DtypeForDavinci.ALL_FLOAT) if product_is_mini() \
else (utils.DtypeForDavinci.FLOAT16,
utils.DtypeForDavinci.FLOAT32,
utils.DtypeForDavinci.INT32,
utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8))
shape = get_shape(x)
if shape[0] != 3:
raise RuntimeError(
"The first axis of input must be 3, actual input is %d" % shape[0])
inp_dtype = x.dtype
need_type_convert = inp_dtype in ("int8", "uint8")
shape = get_shape(x)
shp = shape[1:]
if need_type_convert:
x = Cast(x, "float16", target=utils.CCE)
y = Cast(y, "float16", target=utils.CCE)
a0b1 = tvm.compute(shp, lambda *i: x(0, *i) * y(1, *i), name="a0b1")
a0b2 = tvm.compute(shp, lambda *i: x(0, *i) * y(2, *i), name="a0b2")
a1b0 = tvm.compute(shp, lambda *i: x(1, *i) * y(0, *i), name="a1b0")
a1b2 = tvm.compute(shp, lambda *i: x(1, *i) * y(2, *i), name="a1b2")
a2b0 = tvm.compute(shp, lambda *i: x(2, *i) * y(0, *i), name="a2b0")
a2b1 = tvm.compute(shp, lambda *i: x(2, *i) * y(1, *i), name="a2b1")
res0 = tvm.compute(shp, lambda *i: a1b2(*i) - a2b1(*i), name="res0")
res1 = tvm.compute(shp, lambda *i: a2b0(*i) - a0b2(*i), name="res1")
res2 = tvm.compute(shp, lambda *i: a0b1(*i) - a1b0(*i), name="res2")
res = tvm.compute(
shape,
lambda *i:
tvm.expr.Select(
i[0] == 0,
res0(*i[1:]),
tvm.expr.Select(i[0] == 1, res1(*i[1:]), res2(*i[1:]))),
name='res')
if need_type_convert:
res = Cast(res, inp_dtype, target=utils.CCE)
return res | def cross(x, y, target=utils.CCE):
"""
Compute cross product of x and y.
Note:
The first dim of x or y must be 3, it will be calculated as (two dims for example)
.. math::
res = x \\times y = \\left[ \\begin{matrix}
l, & \\cdots \\\\ m, & \\cdots \\\\ n, & \\cdots
\\end{matrix} \\right] \\times \\left[ \\begin{matrix}
o, & \\cdots \\\\ p, & \\cdots \\\\ q, & \\cdots
\\end{matrix} \\right] = \\left[ \\begin{matrix}
mq-np, & \\cdots \\\\ no-lq, & \\cdots \\\\ lp-mo, & \\cdots \\\\
\\end{matrix} \\right]
Args:
x (tvm.tensor.Tensor): Input tensor, only support float16, float32,
int32, int8, uint8.
y (tvm.tensor.Tensor): Input tensor, must have the same shape and dtype
as x.
Returns:
A tvm.tensor.Tensor with the same shape and dtype as x.
"""
utils.elemwise_shape_check(get_shape(y), get_shape(x))
utils.elemwise_dtype_check(
y.dtype, x.dtype,
(utils.DtypeForDavinci.ALL_FLOAT) if product_is_mini() \
else (utils.DtypeForDavinci.FLOAT16,
utils.DtypeForDavinci.FLOAT32,
utils.DtypeForDavinci.INT32,
utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8))
shape = get_shape(x)
if shape[0] != 3:
raise RuntimeError(
"The first axis of input must be 3, actual input is %d" % shape[0])
inp_dtype = x.dtype
need_type_convert = inp_dtype in ("int8", "uint8")
shape = get_shape(x)
shp = shape[1:]
if need_type_convert:
x = Cast(x, "float16", target=utils.CCE)
y = Cast(y, "float16", target=utils.CCE)
a0b1 = tvm.compute(shp, lambda *i: x(0, *i) * y(1, *i), name="a0b1")
a0b2 = tvm.compute(shp, lambda *i: x(0, *i) * y(2, *i), name="a0b2")
a1b0 = tvm.compute(shp, lambda *i: x(1, *i) * y(0, *i), name="a1b0")
a1b2 = tvm.compute(shp, lambda *i: x(1, *i) * y(2, *i), name="a1b2")
a2b0 = tvm.compute(shp, lambda *i: x(2, *i) * y(0, *i), name="a2b0")
a2b1 = tvm.compute(shp, lambda *i: x(2, *i) * y(1, *i), name="a2b1")
res0 = tvm.compute(shp, lambda *i: a1b2(*i) - a2b1(*i), name="res0")
res1 = tvm.compute(shp, lambda *i: a2b0(*i) - a0b2(*i), name="res1")
res2 = tvm.compute(shp, lambda *i: a0b1(*i) - a1b0(*i), name="res2")
res = tvm.compute(
shape,
lambda *i:
tvm.expr.Select(
i[0] == 0,
res0(*i[1:]),
tvm.expr.Select(i[0] == 1, res1(*i[1:]), res2(*i[1:]))),
name='res')
if need_type_convert:
res = Cast(res, inp_dtype, target=utils.CCE)
return res |
Python | def gen_data(data_format, dtype, shape):
"""Generate data for testing the op"""
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
head_np = input
if data_format == "NC1HWC0":
channel_dims = [1, 4]
elif data_format == DEFAULT:
channel_dims = [1]
else:
channel_dims = [len(shape) - 1]
reduce_axis = [i for i in range(len(shape)) if i not in channel_dims]
if dtype == "float16":
expect = np_bisect_sum(input, axis=tuple(reduce_axis), keepdims=True)
else:
expect = np.sum(input, axis=tuple(reduce_axis), keepdims=True)
output = np.full(expect.shape, np.nan, dtype)
return expect, head_np, input, output | def gen_data(data_format, dtype, shape):
"""Generate data for testing the op"""
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
head_np = input
if data_format == "NC1HWC0":
channel_dims = [1, 4]
elif data_format == DEFAULT:
channel_dims = [1]
else:
channel_dims = [len(shape) - 1]
reduce_axis = [i for i in range(len(shape)) if i not in channel_dims]
if dtype == "float16":
expect = np_bisect_sum(input, axis=tuple(reduce_axis), keepdims=True)
else:
expect = np.sum(input, axis=tuple(reduce_axis), keepdims=True)
output = np.full(expect.shape, np.nan, dtype)
return expect, head_np, input, output |
Python | def random_gaussian(size, miu=3, sigma=1):
""" Generate random array with absolution value obeys gaussian distribution """
if sigma <= 0:
sys.stderr.write("Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma)
sys.exit(1)
rgn = np.random.RandomState(2019)
ret = rgn.normal(miu, sigma, size)
for x in np.nditer(ret, op_flags=['readwrite']):
if secretsGenerator.randint(0, 1):
continue
x[...] = x * -1
return ret | def random_gaussian(size, miu=3, sigma=1):
""" Generate random array with absolution value obeys gaussian distribution """
if sigma <= 0:
sys.stderr.write("Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma)
sys.exit(1)
rgn = np.random.RandomState(2019)
ret = rgn.normal(miu, sigma, size)
for x in np.nditer(ret, op_flags=['readwrite']):
if secretsGenerator.randint(0, 1):
continue
x[...] = x * -1
return ret |
Python | def trace_extract_hybrid(input1):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
dim = input1.shape[1]
trace_tensor = allocate((1,dim), input1.dtype, 'local')
res1 = allocate(input1.shape, input1.dtype, 'local')
for i in range(dim):
for j in range(dim):
res1[0,i,j] = input1[0,i,j]
for j in range(dim):
trace_tensor[0,j] = res1[0,j,j]
return trace_tensor | def trace_extract_hybrid(input1):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
dim = input1.shape[1]
trace_tensor = allocate((1,dim), input1.dtype, 'local')
res1 = allocate(input1.shape, input1.dtype, 'local')
for i in range(dim):
for j in range(dim):
res1[0,i,j] = input1[0,i,j]
for j in range(dim):
trace_tensor[0,j] = res1[0,j,j]
return trace_tensor |
Python | def trace_extract(input1, target="cce"):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
trace_tensor = trace_extract_hybrid(input1)
return trace_tensor | def trace_extract(input1, target="cce"):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
trace_tensor = trace_extract_hybrid(input1)
return trace_tensor |
Python | def confusion_matrix(actual, predict, num_class, target=utils.CCE):
"""
Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the real labels.
The confusion matrix is always a 2-D array of shape `(num_class, num_class)`.
Both `predict` and `actual` must be 1-D arrays of the same shape in order for this function to work.
Args:
actual (tvm.tensor.Tensor): 1-D tensor of type int32.
predict (tvm.tensor.Tensor): 1-D tensor of type int32.
num_class (int): The number of valid labels for a given classification task.
Returns:
tvm.tensor.Tensor, with shape `(num_class, num_class)` representing the confusion matrix.
"""
utils.check_shape(actual, length=1, tensor_name="actual")
utils.check_shape(predict, length=1, tensor_name="predict")
utils.check_equal("length of actual", "length of predict", actual.shape[0].value, predict.shape[0].value)
utils.ops_dtype_check([actual.dtype, predict.dtype], utils.DtypeForDavinci.INT32)
N = num_class
K = actual.shape[0].value
k = akg.tvm.reduce_axis((0, K), "k")
# reduce over first axis
tmp = akg.tvm.compute((K, N, N),
lambda k, i, j: akg.tvm.expr.Select(akg.tvm.all(i == actual[k], j == predict[k]), 1, 0),
name="tmp")
output = akg.tvm.compute((N, N), lambda i, j: akg.tvm.sum(tmp[k][i][j], axis=k))
return output | def confusion_matrix(actual, predict, num_class, target=utils.CCE):
"""
Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the real labels.
The confusion matrix is always a 2-D array of shape `(num_class, num_class)`.
Both `predict` and `actual` must be 1-D arrays of the same shape in order for this function to work.
Args:
actual (tvm.tensor.Tensor): 1-D tensor of type int32.
predict (tvm.tensor.Tensor): 1-D tensor of type int32.
num_class (int): The number of valid labels for a given classification task.
Returns:
tvm.tensor.Tensor, with shape `(num_class, num_class)` representing the confusion matrix.
"""
utils.check_shape(actual, length=1, tensor_name="actual")
utils.check_shape(predict, length=1, tensor_name="predict")
utils.check_equal("length of actual", "length of predict", actual.shape[0].value, predict.shape[0].value)
utils.ops_dtype_check([actual.dtype, predict.dtype], utils.DtypeForDavinci.INT32)
N = num_class
K = actual.shape[0].value
k = akg.tvm.reduce_axis((0, K), "k")
# reduce over first axis
tmp = akg.tvm.compute((K, N, N),
lambda k, i, j: akg.tvm.expr.Select(akg.tvm.all(i == actual[k], j == predict[k]), 1, 0),
name="tmp")
output = akg.tvm.compute((N, N), lambda i, j: akg.tvm.sum(tmp[k][i][j], axis=k))
return output |
Python | def apply_rms_prop_mixed_precision_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop_mixed_precision."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop_mixed_precision, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop_mixed_precision", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, -1, 1, 2), expect=expects)
# output type: fp32, fp16, fp32, fp32
precision = [get_rtol_atol("apply_rms_prop", e.dtype) for e in expects]
results = list(map(lambda x, y, p: compare_tensor(x, y, rtol=p[0], atol=p[1]), outputs, expects, precision))
return inputs, outputs, expects, all(results) | def apply_rms_prop_mixed_precision_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop_mixed_precision."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop_mixed_precision, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop_mixed_precision", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, -1, 1, 2), expect=expects)
# output type: fp32, fp16, fp32, fp32
precision = [get_rtol_atol("apply_rms_prop", e.dtype) for e in expects]
results = list(map(lambda x, y, p: compare_tensor(x, y, rtol=p[0], atol=p[1]), outputs, expects, precision))
return inputs, outputs, expects, all(results) |
Python | def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
# ms = rho * ms + (1-rho) * grad * grad
# mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
# var = var - mom
one = np.array([1.0]).astype(dtype)
ms_1 = rho * ms
ms_2 = (one - rho) * grad * grad
ms_update = ms_1 + ms_2
mom_1 = momentum * mom
mom_2_1 = lr * grad
mom_2_2 = one / np.sqrt(ms_update + epsilon)
mom_3 = mom_2_1 * mom_2_2
mom_update = mom_1 + mom_3
var_update = var - mom_update
expects = (var_update, var_update.astype("float16"), ms_update, mom_update)
outputs = np.full(var_update.shape, np.nan, "float16")
args = [*inputs, outputs]
return inputs, expects, args | def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
# ms = rho * ms + (1-rho) * grad * grad
# mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
# var = var - mom
one = np.array([1.0]).astype(dtype)
ms_1 = rho * ms
ms_2 = (one - rho) * grad * grad
ms_update = ms_1 + ms_2
mom_1 = momentum * mom
mom_2_1 = lr * grad
mom_2_2 = one / np.sqrt(ms_update + epsilon)
mom_3 = mom_2_1 * mom_2_2
mom_update = mom_1 + mom_3
var_update = var - mom_update
expects = (var_update, var_update.astype("float16"), ms_update, mom_update)
outputs = np.full(var_update.shape, np.nan, "float16")
args = [*inputs, outputs]
return inputs, expects, args |
Python | def batch_to_space_nd(data, block_shape, crops, target=utils.CCE):
"""
The N-D version of BatchToSpace.
Rearrange batch data into spatial data blocks and then crop.
Args:
data (tvm.tensor.Tensor): Batch data of type float16.
block_shape (Union[tuple, list]): 1-D shape of length `L`.
crops (Union[tuple, list]): 2-D list of shape `[L][2]`, all values must be greater than or equal to zero.
the i-th block will be cropped in `[crops[i][0] : -crops[i][1]]`.
Returns:
tvm.tensor.Tensor, Spatial data with the same type as data.
"""
check_inputs(data, block_shape, crops)
input_shape = get_shape(data)
block_shape = list(block_shape)
M = len(block_shape)
batch = input_shape[0]
prod_of_block_shape = reduce(lambda x, y: x * y, block_shape)
# step 1
reshaped_shape = block_shape + [batch // prod_of_block_shape] + input_shape[1:]
reshaped = akg.topi.reshape(data, reshaped_shape)
# step 2
tran_axis = list()
tran_axis.append(M) # batch / prod(block_shape)
for i in range(M):
tran_axis.append(M + i + 1) # input_shape[1...M]
tran_axis.append(i) # block_shape[0...M-1]
tran_axis += list(range(len(tran_axis), len(reshaped_shape))) # input_shape[M+1...N-1]
permuted = akg.topi.transpose(reshaped, tran_axis)
# step 3
reshaped_permuted_shape = [batch // prod_of_block_shape]
reshaped_permuted_shape += [input_shape[i + 1] * block_shape[i] for i in range(M)]
reshaped_permuted_shape += input_shape[M + 1:]
reshaped_permuted = akg.topi.reshape(permuted, reshaped_permuted_shape)
# step 4
out_shape = [batch // prod_of_block_shape]
out_shape += [(reshaped_permuted_shape[i + 1] - crops[i][0] - crops[i][1]) for i in range(M)]
out_shape += input_shape[M + 1:]
output = akg.tvm.compute(out_shape,
lambda *i: reshaped_permuted(i[0], *[i[j + 1] + crops[j][0] for j in range(M)], *i[M + 1:]),
name="result")
return output | def batch_to_space_nd(data, block_shape, crops, target=utils.CCE):
"""
The N-D version of BatchToSpace.
Rearrange batch data into spatial data blocks and then crop.
Args:
data (tvm.tensor.Tensor): Batch data of type float16.
block_shape (Union[tuple, list]): 1-D shape of length `L`.
crops (Union[tuple, list]): 2-D list of shape `[L][2]`, all values must be greater than or equal to zero.
the i-th block will be cropped in `[crops[i][0] : -crops[i][1]]`.
Returns:
tvm.tensor.Tensor, Spatial data with the same type as data.
"""
check_inputs(data, block_shape, crops)
input_shape = get_shape(data)
block_shape = list(block_shape)
M = len(block_shape)
batch = input_shape[0]
prod_of_block_shape = reduce(lambda x, y: x * y, block_shape)
# step 1
reshaped_shape = block_shape + [batch // prod_of_block_shape] + input_shape[1:]
reshaped = akg.topi.reshape(data, reshaped_shape)
# step 2
tran_axis = list()
tran_axis.append(M) # batch / prod(block_shape)
for i in range(M):
tran_axis.append(M + i + 1) # input_shape[1...M]
tran_axis.append(i) # block_shape[0...M-1]
tran_axis += list(range(len(tran_axis), len(reshaped_shape))) # input_shape[M+1...N-1]
permuted = akg.topi.transpose(reshaped, tran_axis)
# step 3
reshaped_permuted_shape = [batch // prod_of_block_shape]
reshaped_permuted_shape += [input_shape[i + 1] * block_shape[i] for i in range(M)]
reshaped_permuted_shape += input_shape[M + 1:]
reshaped_permuted = akg.topi.reshape(permuted, reshaped_permuted_shape)
# step 4
out_shape = [batch // prod_of_block_shape]
out_shape += [(reshaped_permuted_shape[i + 1] - crops[i][0] - crops[i][1]) for i in range(M)]
out_shape += input_shape[M + 1:]
output = akg.tvm.compute(out_shape,
lambda *i: reshaped_permuted(i[0], *[i[j + 1] + crops[j][0] for j in range(M)], *i[M + 1:]),
name="result")
return output |
Python | def GenData(shape, dtype):
""" Generate data for testing the op """
class_num = shape[1]
labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0])
labels = np.eye(class_num)[labels_int].astype(dtype)
logits = np.random.random(shape).astype(dtype)
logits = logits / 10 + 1e-05
loss_all = labels * np.log(logits) * -1
loss = np.sum(loss_all, axis=-1)
return labels, logits, loss | def GenData(shape, dtype):
""" Generate data for testing the op """
class_num = shape[1]
labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0])
labels = np.eye(class_num)[labels_int].astype(dtype)
logits = np.random.random(shape).astype(dtype)
logits = logits / 10 + 1e-05
loss_all = labels * np.log(logits) * -1
loss = np.sum(loss_all, axis=-1)
return labels, logits, loss |
Python | def l1_loss_grad(pre_deriv, inputs, outputs, target="cce"):
"""
do backprop for L1 loss (MAE)
"""
inputs_dtype = inputs.dtype
target_dtype = outputs.dtype
pre_deriv_dtype = pre_deriv.dtype
# check inputs data types
check_list = ["float16", "float32"]
if not inputs_dtype.lower() in check_list:
raise RuntimeError("inputs only support %s while dtype is %s" % (
",".join(check_list), inputs_dtype))
if not target_dtype.lower() in check_list:
raise RuntimeError("outputs only support %s while dtype is %s" % (
",".join(check_list), target_dtype))
if not pre_deriv_dtype.lower() in check_list:
raise RuntimeError("prev Derivative only support %s while dtype is %s" % (
",".join(check_list), pre_deriv_dtype))
if not get_const_tuple(outputs.shape) == get_const_tuple(inputs.shape):
raise RuntimeError(
"Please ensure inputs have the same size.", outputs.shape, prediction.shape)
inputs_dtype_old = inputs_dtype
if product_is_mini() and inputs_dtype == 'float32':
inputs = akg.topi.cast(inputs, "float16")
outputs = akg.topi.cast(outputs, "float16")
inputs_dtype = "float16"
def grad_dsl(inputs, outputs, pre_deriv):
# do roadcast outside, cause tvm need shape check;if shape not fix how to check
#pre_deriv = akg.topi.broadcast_to(pre_deriv, inputs.shape)
coefficient = akg.tvm.const(-1.0, dtype=inputs_dtype)
res = akg.tvm.compute(inputs.shape,
lambda *i: akg.tvm.if_then_else(
inputs(*i) >= outputs(*i),
pre_deriv(*i), coefficient * pre_deriv(*i))
)
return res
cur_deriv = grad_dsl(inputs, outputs, pre_deriv)
if product_is_mini() and inputs_dtype_old == 'float32':
cur_deriv = akg.topi.cast(cur_deriv, inputs_dtype_old)
return cur_deriv | def l1_loss_grad(pre_deriv, inputs, outputs, target="cce"):
"""
do backprop for L1 loss (MAE)
"""
inputs_dtype = inputs.dtype
target_dtype = outputs.dtype
pre_deriv_dtype = pre_deriv.dtype
# check inputs data types
check_list = ["float16", "float32"]
if not inputs_dtype.lower() in check_list:
raise RuntimeError("inputs only support %s while dtype is %s" % (
",".join(check_list), inputs_dtype))
if not target_dtype.lower() in check_list:
raise RuntimeError("outputs only support %s while dtype is %s" % (
",".join(check_list), target_dtype))
if not pre_deriv_dtype.lower() in check_list:
raise RuntimeError("prev Derivative only support %s while dtype is %s" % (
",".join(check_list), pre_deriv_dtype))
if not get_const_tuple(outputs.shape) == get_const_tuple(inputs.shape):
raise RuntimeError(
"Please ensure inputs have the same size.", outputs.shape, prediction.shape)
inputs_dtype_old = inputs_dtype
if product_is_mini() and inputs_dtype == 'float32':
inputs = akg.topi.cast(inputs, "float16")
outputs = akg.topi.cast(outputs, "float16")
inputs_dtype = "float16"
def grad_dsl(inputs, outputs, pre_deriv):
# do roadcast outside, cause tvm need shape check;if shape not fix how to check
#pre_deriv = akg.topi.broadcast_to(pre_deriv, inputs.shape)
coefficient = akg.tvm.const(-1.0, dtype=inputs_dtype)
res = akg.tvm.compute(inputs.shape,
lambda *i: akg.tvm.if_then_else(
inputs(*i) >= outputs(*i),
pre_deriv(*i), coefficient * pre_deriv(*i))
)
return res
cur_deriv = grad_dsl(inputs, outputs, pre_deriv)
if product_is_mini() and inputs_dtype_old == 'float32':
cur_deriv = akg.topi.cast(cur_deriv, inputs_dtype_old)
return cur_deriv |
Python | def apply_rms_prop_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
_, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
return mod, expects, args
return mod
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_rms_prop", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) | def apply_rms_prop_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
_, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
return mod, expects, args
return mod
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_rms_prop", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) |
Python | def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
expects = apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon)
args = inputs
return inputs, expects, args | def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
expects = apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon)
args = inputs
return inputs, expects, args |
Python | def slice(data, begin, size):
"""
Extracts a slice from a tensor.
Args:
data (tvm.tensor.Tensor): Input data of type float16, float32, int32.
begin (Union[tuple, list]): Specifies the start index of a slice.
size (Union[tuple, list]): Specifies the size of a slice.
Returns:
tvm.tensor.Tensor, has the same type as input tensor data.
"""
shape = get_shape(data)
utils.check_shape(shape)
utils.check_equal("len(shape)", "len(begin)", len(shape), len(begin))
utils.check_equal("len(shape)", "len(size)", len(shape), len(size))
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
dim_info, _ = slice_set_dim_func(data, begin, size)
attrs = {"dim": dim_info}
out_shape = [size[i] if size[i] > 0 else shape[i] - begin[i] for i in range(len(shape))]
def slice_index(*inputs):
return [begin[i] + inputs[i] for i in range(len(inputs))]
res = akg.tvm.compute(out_shape, lambda *i: data(*slice_index(*i)), name='res')
return res, attrs | def slice(data, begin, size):
"""
Extracts a slice from a tensor.
Args:
data (tvm.tensor.Tensor): Input data of type float16, float32, int32.
begin (Union[tuple, list]): Specifies the start index of a slice.
size (Union[tuple, list]): Specifies the size of a slice.
Returns:
tvm.tensor.Tensor, has the same type as input tensor data.
"""
shape = get_shape(data)
utils.check_shape(shape)
utils.check_equal("len(shape)", "len(begin)", len(shape), len(begin))
utils.check_equal("len(shape)", "len(size)", len(shape), len(size))
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
dim_info, _ = slice_set_dim_func(data, begin, size)
attrs = {"dim": dim_info}
out_shape = [size[i] if size[i] > 0 else shape[i] - begin[i] for i in range(len(shape))]
def slice_index(*inputs):
return [begin[i] + inputs[i] for i in range(len(inputs))]
res = akg.tvm.compute(out_shape, lambda *i: data(*slice_index(*i)), name='res')
return res, attrs |
Python | def MeanAd(head, input_shape, axis, keepdims, target=utils.CCE):
"""
Compute gradient of mean operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Input tensor.
input_shape (Union[list, tuple]): Shape of input tensor of mean operator.
axis (Union[list, tuple, int]): Specifies which axis to reduce.
keepdims (bool): Keep the reduced axis with length 1 if keepdims is true.
Returns:
tvm.tensor.Tensor.
Supported Platforms:
'Ascend'
"""
a = akg.tvm.placeholder(input_shape, head.dtype, "A")
b, _ = Mean(a, axis, keepdims, target=target)
jacs = list(akg.differentiate(b, [a], head))
return jacs[0] | def MeanAd(head, input_shape, axis, keepdims, target=utils.CCE):
"""
Compute gradient of mean operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Input tensor.
input_shape (Union[list, tuple]): Shape of input tensor of mean operator.
axis (Union[list, tuple, int]): Specifies which axis to reduce.
keepdims (bool): Keep the reduced axis with length 1 if keepdims is true.
Returns:
tvm.tensor.Tensor.
Supported Platforms:
'Ascend'
"""
a = akg.tvm.placeholder(input_shape, head.dtype, "A")
b, _ = Mean(a, axis, keepdims, target=target)
jacs = list(akg.differentiate(b, [a], head))
return jacs[0] |
Python | def matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs):
'''
There are four types of fractal format in Davinci core: zZ, zN, nZ, nN
general matmul format
left_trans: False right_trans False: zZ * nZ = zN
left_trans: True right_trans False: nN * nZ = zN
left_trans: False right_trans True : zZ * zN = zN
left_trans: True right_trans True : nN * zN = zN
Now we need to support: zN * nZ = zN
use left_format to specify, left matrix data format
use right_format to specify, right matrix data format
'''
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
mod = matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs)
# Generate data
m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)
# mod launch
output = np.full(out_shape, np.nan, out_dtype)
if bias == 0:
output = utils.mod_launch(mod, (m_x, m_y, output), expect=bench_mark)
elif bias == 1:
output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("matmul", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
# compare_result = utils.result_compare(output, bench_mark, r_tol=5e-3)
return (m_x, m_y), output, bench_mark, compare_result | def matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs):
'''
There are four types of fractal format in Davinci core: zZ, zN, nZ, nN
general matmul format
left_trans: False right_trans False: zZ * nZ = zN
left_trans: True right_trans False: nN * nZ = zN
left_trans: False right_trans True : zZ * zN = zN
left_trans: True right_trans True : nN * zN = zN
Now we need to support: zN * nZ = zN
use left_format to specify, left matrix data format
use right_format to specify, right matrix data format
'''
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
mod = matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs)
# Generate data
m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)
# mod launch
output = np.full(out_shape, np.nan, out_dtype)
if bias == 0:
output = utils.mod_launch(mod, (m_x, m_y, output), expect=bench_mark)
elif bias == 1:
output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("matmul", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
# compare_result = utils.result_compare(output, bench_mark, r_tol=5e-3)
return (m_x, m_y), output, bench_mark, compare_result |
Python | def ZerosLike(input, target=utils.CCE):
"""
Generate an array of zeros.
Args:
input(tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor with the same type and shape as input.
Supported Platforms:
'Ascend'
"""
dtype = input.dtype
shape = [x.value for x in input.shape]
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
utils.check_shape(shape)
output = akg.tvm.compute(input.shape, lambda *i: akg.tvm.const(0, input.dtype), name="output")
return output | def ZerosLike(input, target=utils.CCE):
"""
Generate an array of zeros.
Args:
input(tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor with the same type and shape as input.
Supported Platforms:
'Ascend'
"""
dtype = input.dtype
shape = [x.value for x in input.shape]
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
utils.check_shape(shape)
output = akg.tvm.compute(input.shape, lambda *i: akg.tvm.const(0, input.dtype), name="output")
return output |
Python | def TanhAd(head, in_data, target=utils.CCE):
"""
Compute gradient of tanh operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32.
in_data (tvm.tensor.Tensor): Tensor of type float16, float32.
Returns:
tvm.tensor.Tensor has the same shape as input.
Supported Platforms:
'Ascend'
"""
in_dtype = in_data.dtype
# On cloud environment, cast data type from 'float16' to 'float32',
# then cast result back to 'float16', could achieve higher precision.
if in_dtype == 'float16' and not product_is_mini():
in_data = akg.topi.cast(in_data, "float32")
head = akg.topi.cast(head, "float32")
out_data = Tanh(in_data, target=target)
jacs = list(akg.differentiate(out_data, [in_data], head))
jacs_res = jacs[0]
if in_dtype == 'float16' and not product_is_mini():
jacs_res = akg.topi.cast(jacs_res, 'float16')
return jacs_res | def TanhAd(head, in_data, target=utils.CCE):
"""
Compute gradient of tanh operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32.
in_data (tvm.tensor.Tensor): Tensor of type float16, float32.
Returns:
tvm.tensor.Tensor has the same shape as input.
Supported Platforms:
'Ascend'
"""
in_dtype = in_data.dtype
# On cloud environment, cast data type from 'float16' to 'float32',
# then cast result back to 'float16', could achieve higher precision.
if in_dtype == 'float16' and not product_is_mini():
in_data = akg.topi.cast(in_data, "float32")
head = akg.topi.cast(head, "float32")
out_data = Tanh(in_data, target=target)
jacs = list(akg.differentiate(out_data, [in_data], head))
jacs_res = jacs[0]
if in_dtype == 'float16' and not product_is_mini():
jacs_res = akg.topi.cast(jacs_res, 'float16')
return jacs_res |
Python | def matmul4d_ad(head, x, y, b, out_dtype, adj_x=False, adj_y=False):
"""compute 4d format mat shape from shape inputs."""
shape_xx = get_shape(x)
if adj_x: # no need to change in this case
shape_xx_forward = shape_xx
else:
batch_num, m_o, k_o, m_i, k_i = shape_xx
shape_xx_forward = (batch_num, k_o, m_o, k_i, m_i)
########################################
# compute the forward kernel #
########################################
x_temp = akg.tvm.placeholder(shape_xx_forward, name="input_1", dtype=x.dtype)
# we transfer all cases to that of adj_x=False
out = MatMul(x_temp, y, b, out_dtype, "zN", "nZ", "zN", False, adj_y)[0]
########################################
# compute the backward kernel #
########################################
_jacs = list(akg.differentiate(out, [x_temp], head))
if adj_x:
grad = akg.tvm.compute(shape_xx, lambda n, ko, mo, ki, mi: _jacs[0][n, ko, mo, mi, ki])
else:
grad = akg.tvm.compute(shape_xx, lambda n, mo, ko, mi, ki: _jacs[0][n, ko, mo, mi, ki])
sjacs = akg.tvm.create_schedule([grad.op])
attrs = dict()
attrs["pragma_data_transpose"] = "Y"
attrs["pragma_data_transpose_block"] = "Y"
if not adj_y:
attrs["pragma_weight_transpose"] = "Y"
return grad, attrs | def matmul4d_ad(head, x, y, b, out_dtype, adj_x=False, adj_y=False):
"""compute 4d format mat shape from shape inputs."""
shape_xx = get_shape(x)
if adj_x: # no need to change in this case
shape_xx_forward = shape_xx
else:
batch_num, m_o, k_o, m_i, k_i = shape_xx
shape_xx_forward = (batch_num, k_o, m_o, k_i, m_i)
########################################
# compute the forward kernel #
########################################
x_temp = akg.tvm.placeholder(shape_xx_forward, name="input_1", dtype=x.dtype)
# we transfer all cases to that of adj_x=False
out = MatMul(x_temp, y, b, out_dtype, "zN", "nZ", "zN", False, adj_y)[0]
########################################
# compute the backward kernel #
########################################
_jacs = list(akg.differentiate(out, [x_temp], head))
if adj_x:
grad = akg.tvm.compute(shape_xx, lambda n, ko, mo, ki, mi: _jacs[0][n, ko, mo, mi, ki])
else:
grad = akg.tvm.compute(shape_xx, lambda n, mo, ko, mi, ki: _jacs[0][n, ko, mo, mi, ki])
sjacs = akg.tvm.create_schedule([grad.op])
attrs = dict()
attrs["pragma_data_transpose"] = "Y"
attrs["pragma_data_transpose_block"] = "Y"
if not adj_y:
attrs["pragma_weight_transpose"] = "Y"
return grad, attrs |
Python | def Concat(data, axis, target=utils.CCE):
"""
Concatenates data along the dimension set by axis.
Args:
data (Union[list, tuple]): list or tuple of tvm.tensor.Tensor of type float16, float32, int32, int8, uint8
axis (int): Specifies the axis along which to concatenate. Must be in the range [-rank(data), rank(data))
Returns:
tvm.tensor.Tensor of same type as data.
"""
data_size = len(data)
if data_size < min_size:
raise RuntimeError("The size of data must be greater equal 1")
dtype = data[0].dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES)
shape_0 = data[0].shape
utils.check_shape(shape_0)
if axis < 0:
axis += len(shape_0)
for i in range(1, data_size):
shape_i = data[i].shape
utils.check_shape(shape_i)
if len(shape_i) != len(shape_0):
raise ValueError("Input tensors must have same dimensions.")
res = akg.lang.ascend.concat(data, axis)
return res | def Concat(data, axis, target=utils.CCE):
"""
Concatenates data along the dimension set by axis.
Args:
data (Union[list, tuple]): list or tuple of tvm.tensor.Tensor of type float16, float32, int32, int8, uint8
axis (int): Specifies the axis along which to concatenate. Must be in the range [-rank(data), rank(data))
Returns:
tvm.tensor.Tensor of same type as data.
"""
data_size = len(data)
if data_size < min_size:
raise RuntimeError("The size of data must be greater equal 1")
dtype = data[0].dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES)
shape_0 = data[0].shape
utils.check_shape(shape_0)
if axis < 0:
axis += len(shape_0)
for i in range(1, data_size):
shape_i = data[i].shape
utils.check_shape(shape_i)
if len(shape_i) != len(shape_0):
raise ValueError("Input tensors must have same dimensions.")
res = akg.lang.ascend.concat(data, axis)
return res |
Python | def laplacian_of_gaussian_ad(head, x, target="cce"):
"""2nd derivative of gaussian, which should be the same as laplacian of gaussian filter."""
y = gaussian(x)
# 1st derivative
dx = list(akg.differentiate(y, [x], head))
head_fake = akg.tvm.compute(x.shape, lambda * ind: akg.tvm.const(1.0, dtype=y.dtype))
# 2nd derivative
dx2 = list(akg.differentiate(dx[0], [x], head_fake))
return dx2[0] | def laplacian_of_gaussian_ad(head, x, target="cce"):
"""2nd derivative of gaussian, which should be the same as laplacian of gaussian filter."""
y = gaussian(x)
# 1st derivative
dx = list(akg.differentiate(y, [x], head))
head_fake = akg.tvm.compute(x.shape, lambda * ind: akg.tvm.const(1.0, dtype=y.dtype))
# 2nd derivative
dx2 = list(akg.differentiate(dx[0], [x], head_fake))
return dx2[0] |
Python | def prob_regression(x, w):
"""
Create probabilistic regression model. Potentially, make distribution one of the parameters, and allow variable
arguments
"""
assert x.shape[1].value == w.shape[1].value
pred = akg.topi.nn.dense(x, w)
model = normal_unit_var.normal_unit_var(pred)
return model | def prob_regression(x, w):
"""
Create probabilistic regression model. Potentially, make distribution one of the parameters, and allow variable
arguments
"""
assert x.shape[1].value == w.shape[1].value
pred = akg.topi.nn.dense(x, w)
model = normal_unit_var.normal_unit_var(pred)
return model |
Python | def prob_regression_train(x, w, y):
"""
One step of training probabilistic regression
Args:
x: input
w: trained weight
y: output
Returns:
dw: change in weight
"""
model = prob_regression(x, w)
log_prob = model.log_prob(y)
neglik = akg.topi.sum(-log_prob, [0, 1])
head = akg.tvm.compute(neglik.shape,
lambda *indices:
akg.tvm.const(1.0, dtype = y.dtype))
dw = list(akg.differentiate(neglik, [w], head))
return dw[0] | def prob_regression_train(x, w, y):
"""
One step of training probabilistic regression
Args:
x: input
w: trained weight
y: output
Returns:
dw: change in weight
"""
model = prob_regression(x, w)
log_prob = model.log_prob(y)
neglik = akg.topi.sum(-log_prob, [0, 1])
head = akg.tvm.compute(neglik.shape,
lambda *indices:
akg.tvm.const(1.0, dtype = y.dtype))
dw = list(akg.differentiate(neglik, [w], head))
return dw[0] |
Python | def auto_cast_of_elewise(func, *args, **kwargs):
"""
auto cast dectorator.
Note:
Before calling elewise api, check the input tensor is supported by the intr.
If not supported, casting the input tensor to supported dtype.
(On condition that the cast type is supported.
If the cast type is not supported,raising a RuntimeError).
"""
global need_save_type
intr = func.__name__
if need_save_type:
save_op_output_dtype(func, *args)
need_save_type = True
supported_types = get_intr_types("Intrinsic_" + intr)
if len(args) == 1:
temp_tensor = args[0]
dtype = temp_tensor.dtype
if dtype not in supported_types:
if "float32" in supported_types and is_cast_support(dtype, "float32"):
temp_tensor = cast(temp_tensor, "float32")
else:
temp_tensor = cast(temp_tensor, "float16")
return func(temp_tensor)
if len(args) == 2:
if isinstance(args[1], akg.tvm.tensor.Tensor):
lhs = args[0]
rhs = args[1]
# get tensor from tuple(tensor, attrs)
if isinstance(lhs, tuple):
lhs = list(lhs)[0]
dtype_l = lhs.dtype
dtype_r = rhs.dtype
lhs_t = lhs
rhs_t = rhs
if dtype_l not in supported_types or dtype_r not in supported_types or dtype_l != dtype_r:
if "float32" in supported_types and is_cast_support(dtype_l, "float32")\
and is_cast_support(dtype_r, "float32"):
lhs_t = cast(lhs, "float32")
rhs_t = cast(rhs, "float32")
else:
lhs_t = cast(lhs, "float16")
rhs_t = cast(rhs, "float16")
return func(lhs_t, rhs_t)
temp_tensor = args[0]
if isinstance(temp_tensor, tuple):
temp_tensor = list(temp_tensor)[0]
scalar = args[1]
dtype = temp_tensor.dtype
if dtype not in supported_types:
if "float32" in supported_types and is_cast_support(dtype, "float32"):
temp_tensor = cast(temp_tensor, "float32")
dtype = "float32"
else:
temp_tensor = cast(temp_tensor, "float16")
dtype = "float16"
tmp_arg = scalar
scalar_type = judge_var(scalar)
if scalar_type == "tvm_const" and scalar.dtype != dtype:
tmp_arg = akg.tvm.const(scalar.value, dtype=dtype)
if scalar_type == "python_const":
tmp_arg = akg.tvm.const(scalar, dtype=dtype)
return func(temp_tensor, tmp_arg)
if len(args) == 3:
if isinstance(args[2], akg.tvm.tensor.Tensor):
x = args[0]
y = args[1]
z = args[2]
dtype_x = x.dtype
dtype_y = y.dtype
dtype_z = z.dtype
x_t = x
y_t = y
z_t = z
if dtype_x != dtype_y or dtype_x != dtype_z or dtype_z != dtype_y:
raise RuntimeError("Input tensors must has same dtype!")
if dtype_x not in supported_types:
if "float32" in supported_types and is_cast_support(dtype_x, "float32"):
x_t = cast(x, "float32")
y_t = cast(y, "float32")
z_t = cast(z, "float32")
else:
x_t = cast(x, "float16")
y_t = cast(y, "float16")
z_t = cast(z, "float16")
return func(x_t, y_t, z_t)
lhs = args[0]
rhs = args[1]
scalar = args[2]
dtype_l = lhs.dtype
dtype_r = rhs.dtype
lhs_t = lhs
rhs_t = rhs
if dtype_l not in supported_types or dtype_r not in supported_types or dtype_l != dtype_r:
if "float32" in supported_types and is_cast_support(dtype_l, "float32")\
and is_cast_support(dtype_r, "float32"):
lhs_t = cast(lhs, "float32")
rhs_t = cast(rhs, "float32")
dtype_l = "float32"
else:
lhs_t = cast(lhs, "float16")
rhs_t = cast(rhs, "float16")
dtype_l = "float16"
tmp_arg = scalar
scalar_type = judge_var(scalar)
if scalar_type == "tvm_const" and scalar.dtype != dtype_l:
tmp_arg = akg.tvm.const(scalar.value, dtype=dtype_l)
if scalar_type == "python_const":
tmp_arg = akg.tvm.const(scalar, dtype=dtype_l)
return func(lhs_t, rhs_t, tmp_arg)
return func(*args, **kwargs) | def auto_cast_of_elewise(func, *args, **kwargs):
"""
auto cast dectorator.
Note:
Before calling elewise api, check the input tensor is supported by the intr.
If not supported, casting the input tensor to supported dtype.
(On condition that the cast type is supported.
If the cast type is not supported,raising a RuntimeError).
"""
global need_save_type
intr = func.__name__
if need_save_type:
save_op_output_dtype(func, *args)
need_save_type = True
supported_types = get_intr_types("Intrinsic_" + intr)
if len(args) == 1:
temp_tensor = args[0]
dtype = temp_tensor.dtype
if dtype not in supported_types:
if "float32" in supported_types and is_cast_support(dtype, "float32"):
temp_tensor = cast(temp_tensor, "float32")
else:
temp_tensor = cast(temp_tensor, "float16")
return func(temp_tensor)
if len(args) == 2:
if isinstance(args[1], akg.tvm.tensor.Tensor):
lhs = args[0]
rhs = args[1]
# get tensor from tuple(tensor, attrs)
if isinstance(lhs, tuple):
lhs = list(lhs)[0]
dtype_l = lhs.dtype
dtype_r = rhs.dtype
lhs_t = lhs
rhs_t = rhs
if dtype_l not in supported_types or dtype_r not in supported_types or dtype_l != dtype_r:
if "float32" in supported_types and is_cast_support(dtype_l, "float32")\
and is_cast_support(dtype_r, "float32"):
lhs_t = cast(lhs, "float32")
rhs_t = cast(rhs, "float32")
else:
lhs_t = cast(lhs, "float16")
rhs_t = cast(rhs, "float16")
return func(lhs_t, rhs_t)
temp_tensor = args[0]
if isinstance(temp_tensor, tuple):
temp_tensor = list(temp_tensor)[0]
scalar = args[1]
dtype = temp_tensor.dtype
if dtype not in supported_types:
if "float32" in supported_types and is_cast_support(dtype, "float32"):
temp_tensor = cast(temp_tensor, "float32")
dtype = "float32"
else:
temp_tensor = cast(temp_tensor, "float16")
dtype = "float16"
tmp_arg = scalar
scalar_type = judge_var(scalar)
if scalar_type == "tvm_const" and scalar.dtype != dtype:
tmp_arg = akg.tvm.const(scalar.value, dtype=dtype)
if scalar_type == "python_const":
tmp_arg = akg.tvm.const(scalar, dtype=dtype)
return func(temp_tensor, tmp_arg)
if len(args) == 3:
if isinstance(args[2], akg.tvm.tensor.Tensor):
x = args[0]
y = args[1]
z = args[2]
dtype_x = x.dtype
dtype_y = y.dtype
dtype_z = z.dtype
x_t = x
y_t = y
z_t = z
if dtype_x != dtype_y or dtype_x != dtype_z or dtype_z != dtype_y:
raise RuntimeError("Input tensors must has same dtype!")
if dtype_x not in supported_types:
if "float32" in supported_types and is_cast_support(dtype_x, "float32"):
x_t = cast(x, "float32")
y_t = cast(y, "float32")
z_t = cast(z, "float32")
else:
x_t = cast(x, "float16")
y_t = cast(y, "float16")
z_t = cast(z, "float16")
return func(x_t, y_t, z_t)
lhs = args[0]
rhs = args[1]
scalar = args[2]
dtype_l = lhs.dtype
dtype_r = rhs.dtype
lhs_t = lhs
rhs_t = rhs
if dtype_l not in supported_types or dtype_r not in supported_types or dtype_l != dtype_r:
if "float32" in supported_types and is_cast_support(dtype_l, "float32")\
and is_cast_support(dtype_r, "float32"):
lhs_t = cast(lhs, "float32")
rhs_t = cast(rhs, "float32")
dtype_l = "float32"
else:
lhs_t = cast(lhs, "float16")
rhs_t = cast(rhs, "float16")
dtype_l = "float16"
tmp_arg = scalar
scalar_type = judge_var(scalar)
if scalar_type == "tvm_const" and scalar.dtype != dtype_l:
tmp_arg = akg.tvm.const(scalar.value, dtype=dtype_l)
if scalar_type == "python_const":
tmp_arg = akg.tvm.const(scalar, dtype=dtype_l)
return func(lhs_t, rhs_t, tmp_arg)
return func(*args, **kwargs) |
Python | def vmuls(raw_tensor, scalar):
"""
multiply a tensor by a scalar, dtype of raw_tensor and scalar must be the same.
Args:
raw_tensor (tvm.tensor.Tensor): input.
scalar (Union[float, int, tvm const]): input.
Returns:
tvm.tensor.Tensor, raw_tensor * scalar.
"""
dtype = raw_tensor.dtype
return single_elewise_op(raw_tensor, dtype, 'elewise_single_VS_mul', args=[scalar]) | def vmuls(raw_tensor, scalar):
"""
multiply a tensor by a scalar, dtype of raw_tensor and scalar must be the same.
Args:
raw_tensor (tvm.tensor.Tensor): input.
scalar (Union[float, int, tvm const]): input.
Returns:
tvm.tensor.Tensor, raw_tensor * scalar.
"""
dtype = raw_tensor.dtype
return single_elewise_op(raw_tensor, dtype, 'elewise_single_VS_mul', args=[scalar]) |
Python | def vadds(raw_tensor, scalar):
"""
add a tensor by a scalar, dtype of raw_tensor and scalar must be the same.
Args:
raw_tensor (tvm.tensor.Tensor): input tensor.
scalar (Union[float, int, tvm const]): input scalar.
Returns:
tvm.tensor.Tensor, raw_tensor + scalar.
"""
dtype = raw_tensor.dtype
return single_elewise_op(raw_tensor, dtype, 'elewise_single_VS_add', args=[scalar]) | def vadds(raw_tensor, scalar):
"""
add a tensor by a scalar, dtype of raw_tensor and scalar must be the same.
Args:
raw_tensor (tvm.tensor.Tensor): input tensor.
scalar (Union[float, int, tvm const]): input scalar.
Returns:
tvm.tensor.Tensor, raw_tensor + scalar.
"""
dtype = raw_tensor.dtype
return single_elewise_op(raw_tensor, dtype, 'elewise_single_VS_add', args=[scalar]) |
Python | def single_elewise_op(input_tensor, dtype, op, args=None):
"""factory method of single elewise operations."""
in_tensor = input_tensor
shape = get_shape_from_tensor(in_tensor)
if op == "elewise_single_log":
lambda_func = lambda *indice: akg.tvm.log(in_tensor(*indice))
elif op == "elewise_single_exp":
lambda_func = lambda *indice: akg.tvm.exp(in_tensor(*indice))
elif op == "elewise_single_rec":
lambda_func = lambda *indice: akg.tvm.const(1.0, dtype) / in_tensor(*indice)
elif op == "elewise_single_VS_add":
if not len(args) == 1:
raise RuntimeError("The length of the args must be 1, but got %s" % len(args))
lambda_func = lambda *indice: in_tensor(*indice) + args[0].astype(dtype)
elif op == "elewise_single_VS_mul":
if not len(args) == 1:
raise RuntimeError("The length of the args must be 1, but got %s" % len(args))
lambda_func = lambda *indice: in_tensor(*indice) * args[0].astype(dtype)
elif op == "elewise_single_abs":
lambda_func = lambda *indice: akg.tvm.abs(in_tensor(*indice))
elif op == "elewise_single_relu":
lambda_func = lambda *indice: akg.tvm.select(in_tensor(*indice) >=
0, in_tensor(*indice), akg.tvm.const(0, dtype=dtype))
elif op == "elewise_single_not":
lambda_func = lambda *indice: - in_tensor(*indice)
elif op == "elewise_single_sqrt":
lambda_func = lambda *indice: akg.tvm.sqrt(in_tensor(*indice))
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp | def single_elewise_op(input_tensor, dtype, op, args=None):
"""factory method of single elewise operations."""
in_tensor = input_tensor
shape = get_shape_from_tensor(in_tensor)
if op == "elewise_single_log":
lambda_func = lambda *indice: akg.tvm.log(in_tensor(*indice))
elif op == "elewise_single_exp":
lambda_func = lambda *indice: akg.tvm.exp(in_tensor(*indice))
elif op == "elewise_single_rec":
lambda_func = lambda *indice: akg.tvm.const(1.0, dtype) / in_tensor(*indice)
elif op == "elewise_single_VS_add":
if not len(args) == 1:
raise RuntimeError("The length of the args must be 1, but got %s" % len(args))
lambda_func = lambda *indice: in_tensor(*indice) + args[0].astype(dtype)
elif op == "elewise_single_VS_mul":
if not len(args) == 1:
raise RuntimeError("The length of the args must be 1, but got %s" % len(args))
lambda_func = lambda *indice: in_tensor(*indice) * args[0].astype(dtype)
elif op == "elewise_single_abs":
lambda_func = lambda *indice: akg.tvm.abs(in_tensor(*indice))
elif op == "elewise_single_relu":
lambda_func = lambda *indice: akg.tvm.select(in_tensor(*indice) >=
0, in_tensor(*indice), akg.tvm.const(0, dtype=dtype))
elif op == "elewise_single_not":
lambda_func = lambda *indice: - in_tensor(*indice)
elif op == "elewise_single_sqrt":
lambda_func = lambda *indice: akg.tvm.sqrt(in_tensor(*indice))
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp |
Python | def vmin(lhs, rhs):
"""
calculate elewise compare, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): right hand tensor.
Return:
tvm.tensor.Tensor, min value.
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_min") | def vmin(lhs, rhs):
"""
calculate elewise compare, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): right hand tensor.
Return:
tvm.tensor.Tensor, min value.
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_min") |
Python | def vmax(lhs, rhs):
"""
calculate elewise compare, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_max") | def vmax(lhs, rhs):
"""
calculate elewise compare, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_max") |
Python | def vor(lhs, rhs):
"""
calculate elewise or op, return the or value.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, or(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_or") | def vor(lhs, rhs):
"""
calculate elewise or op, return the or value.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, or(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_or") |
Python | def vand(lhs, rhs):
"""
calculate elewise and op, return the and value.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_and") | def vand(lhs, rhs):
"""
calculate elewise and op, return the and value.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_and") |
Python | def vaxpy(lhs, rhs, scalar):
"""
calculate elewise scalar * lhs + rhs, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
scalar(tvm.tensor.Tensor): input scalar.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_scalar_axpy", args=[scalar]) | def vaxpy(lhs, rhs, scalar):
"""
calculate elewise scalar * lhs + rhs, return the min one.
Args:
lhs (tvm.tensor.Tensor): left hand tensor.
rhs (tvm.tensor.Tensor): left hand tensor.
scalar(tvm.tensor.Tensor): input scalar.
Returns:
tvm.tensor.Tensor, max(lhs , rhs).
"""
return binary_elewise_op(lhs, rhs, op="elewise_binary_scalar_axpy", args=[scalar]) |
Python | def binary_elewise_op(lh_tensor, rh_tensor, op, args=None):
"""factory method of binary elewise operations."""
shape_binary_elewise_op_check(lh_tensor, rh_tensor)
if lh_tensor.dtype != rh_tensor.dtype:
raise RuntimeError("dtype must be the same while lhType is %s, rhType is %s" %
(lh_tensor.dtype, rh_tensor.dtype))
shape = lh_tensor.shape
dtype = lh_tensor.dtype
if op == "elewise_binary_add":
lambda_func = lambda *indice: lh_tensor(*indice) + rh_tensor(*indice)
elif op == "elewise_binary_sub":
lambda_func = lambda *indice: lh_tensor(*indice) - rh_tensor(*indice)
elif op == "elewise_binary_mul":
lambda_func = lambda *indice: lh_tensor(*indice) * rh_tensor(*indice)
elif op == "elewise_binary_min":
lambda_func = lambda *indice: akg.tvm.min(lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_max":
lambda_func = lambda *indice: akg.tvm.max(lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_or":
lambda_func = lambda *indice: akg.tvm.select(akg.tvm.any(lh_tensor(*indice) > 0, rh_tensor(*indice) > 0),
lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_and":
lambda_func = lambda *indice: akg.tvm.select(akg.tvm.all(lh_tensor(*indice) > 0, rh_tensor(*indice) > 0),
lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_scalar_axpy":
lambda_func = lambda *indice: args[0].astype(dtype) * lh_tensor(*indice) + rh_tensor(*indice)
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp | def binary_elewise_op(lh_tensor, rh_tensor, op, args=None):
"""factory method of binary elewise operations."""
shape_binary_elewise_op_check(lh_tensor, rh_tensor)
if lh_tensor.dtype != rh_tensor.dtype:
raise RuntimeError("dtype must be the same while lhType is %s, rhType is %s" %
(lh_tensor.dtype, rh_tensor.dtype))
shape = lh_tensor.shape
dtype = lh_tensor.dtype
if op == "elewise_binary_add":
lambda_func = lambda *indice: lh_tensor(*indice) + rh_tensor(*indice)
elif op == "elewise_binary_sub":
lambda_func = lambda *indice: lh_tensor(*indice) - rh_tensor(*indice)
elif op == "elewise_binary_mul":
lambda_func = lambda *indice: lh_tensor(*indice) * rh_tensor(*indice)
elif op == "elewise_binary_min":
lambda_func = lambda *indice: akg.tvm.min(lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_max":
lambda_func = lambda *indice: akg.tvm.max(lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_or":
lambda_func = lambda *indice: akg.tvm.select(akg.tvm.any(lh_tensor(*indice) > 0, rh_tensor(*indice) > 0),
lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_and":
lambda_func = lambda *indice: akg.tvm.select(akg.tvm.all(lh_tensor(*indice) > 0, rh_tensor(*indice) > 0),
lh_tensor(*indice), rh_tensor(*indice))
elif op == "elewise_binary_scalar_axpy":
lambda_func = lambda *indice: args[0].astype(dtype) * lh_tensor(*indice) + rh_tensor(*indice)
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp |
Python | def multiple_elewise_op(x, y, z, op):
"""factory method of binary multiple operations."""
shape_multi_elewise_op_check(x, y, z)
if x.dtype != y.dtype or x.dtype != z.dtype or z.dtype != y.dtype:
raise RuntimeError("dtype must be the same to each other")
shape = x.shape
dtype = x.dtype
if op == "elewise_multiple_mla":
lambda_func = lambda *indice: x(*indice) * y(*indice) + z(*indice)
elif op == "elewise_multiple_madd":
lambda_func = lambda *indice: x(*indice) * y(*indice) + z(*indice)
elif op == "elewise_multiple_maddrelu":
lambda_func = lambda *indice: akg.tvm.select((x(*indice) * y(*indice) + z(*indice)) >= 0,
x(*indice) * y(*indice) + z(*indice),
akg.tvm.const(0, dtype=dtype))
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp | def multiple_elewise_op(x, y, z, op):
"""factory method of binary multiple operations."""
shape_multi_elewise_op_check(x, y, z)
if x.dtype != y.dtype or x.dtype != z.dtype or z.dtype != y.dtype:
raise RuntimeError("dtype must be the same to each other")
shape = x.shape
dtype = x.dtype
if op == "elewise_multiple_mla":
lambda_func = lambda *indice: x(*indice) * y(*indice) + z(*indice)
elif op == "elewise_multiple_madd":
lambda_func = lambda *indice: x(*indice) * y(*indice) + z(*indice)
elif op == "elewise_multiple_maddrelu":
lambda_func = lambda *indice: akg.tvm.select((x(*indice) * y(*indice) + z(*indice)) >= 0,
x(*indice) * y(*indice) + z(*indice),
akg.tvm.const(0, dtype=dtype))
else:
raise RuntimeError("operation %s not support yet" % op)
name = op.split("_")[-1] + "_" + str(name_index[0])
name_index[0] += 1
with akg.tvm.tag_scope(op):
tmp = akg.tvm.compute(shape, lambda_func, name=name)
return tmp |
Python | def BiasAdd(data1, data2, data_format, target=utils.CCE):
"""
Adds bias data2 to input tensor data1.
Args:
data1 (tvm.tensor.Tensor): Tensor of type float16, float32.
data2 (tvm.tensor.Tensor): The bias tensor, should be of same type as data1.
If shape(data2) != shape(data1), broadcast will happen.
data_format (str): Data format of input tensors, could be NC1HWC0, NHWC or DefaultFormat.
Returns:
tvm.tensor.Tensor of same shape and type as data1.
Supported Platforms:
'Ascend'
"""
utils.check_shape(data1.shape)
utils.check_shape(data2.shape)
shape1 = get_shape(data1)
shape2 = get_shape(data2)
utils.davinci_format_check(shape1, data_format)
utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_FLOAT)
if data_format == 'NC1HWC0':
data2_new = akg.lang.ascend.broadcast(data2, shape1)
res = akg.lang.ascend.vadd(data1, data2_new)
else:
if len(shape2) != 1:
raise RuntimeError("data2 should be a 1D Tensor!")
if data_format == "NHWC":
if len(shape1) != 4:
raise RuntimeError("bias_add only support 4D shape when data format is NHWC!")
c_dim_len = shape1[3]
if c_dim_len != shape2[0]:
raise ValueError("The size of bias should be equal to the channel dimension, "
" while the size of bias is {0} and the channel dimension is "
"{1}".format(shape2[0], c_dim_len))
data2_reshaped = Reshape(data2, [1, 1, 1, shape2[0]], target=utils.CCE)
elif data_format == "DefaultFormat":
if len(shape1) != 2 and len(shape1) != 4:
raise RuntimeError("bias_add only support 2D and 4D shape when data format is DefaultFormat!")
c_dim_len = shape1[1]
if c_dim_len != shape2[0]:
raise ValueError("The size of bias should be equal to the channel dimension, "
" while the size of bias is {0} and the channel dimension is "
"{1}".format(shape2[0], c_dim_len))
if len(shape1) == 2:
data2_reshaped = Reshape(data2, [1, shape2[0]], target=utils.CCE)
else:
# NCHW
data2_reshaped = Reshape(data2, [1, shape2[0], 1, 1], target=utils.CCE)
data2_new = akg.lang.ascend.broadcast(data2_reshaped, shape1)
res = akg.lang.ascend.vadd(data1, data2_new)
akg.register_variables("reshape_diff", [data2], data2_reshaped)
return res | def BiasAdd(data1, data2, data_format, target=utils.CCE):
"""
Adds bias data2 to input tensor data1.
Args:
data1 (tvm.tensor.Tensor): Tensor of type float16, float32.
data2 (tvm.tensor.Tensor): The bias tensor, should be of same type as data1.
If shape(data2) != shape(data1), broadcast will happen.
data_format (str): Data format of input tensors, could be NC1HWC0, NHWC or DefaultFormat.
Returns:
tvm.tensor.Tensor of same shape and type as data1.
Supported Platforms:
'Ascend'
"""
utils.check_shape(data1.shape)
utils.check_shape(data2.shape)
shape1 = get_shape(data1)
shape2 = get_shape(data2)
utils.davinci_format_check(shape1, data_format)
utils.ops_dtype_check([data1.dtype, data2.dtype], utils.DtypeForDavinci.ALL_FLOAT)
if data_format == 'NC1HWC0':
data2_new = akg.lang.ascend.broadcast(data2, shape1)
res = akg.lang.ascend.vadd(data1, data2_new)
else:
if len(shape2) != 1:
raise RuntimeError("data2 should be a 1D Tensor!")
if data_format == "NHWC":
if len(shape1) != 4:
raise RuntimeError("bias_add only support 4D shape when data format is NHWC!")
c_dim_len = shape1[3]
if c_dim_len != shape2[0]:
raise ValueError("The size of bias should be equal to the channel dimension, "
" while the size of bias is {0} and the channel dimension is "
"{1}".format(shape2[0], c_dim_len))
data2_reshaped = Reshape(data2, [1, 1, 1, shape2[0]], target=utils.CCE)
elif data_format == "DefaultFormat":
if len(shape1) != 2 and len(shape1) != 4:
raise RuntimeError("bias_add only support 2D and 4D shape when data format is DefaultFormat!")
c_dim_len = shape1[1]
if c_dim_len != shape2[0]:
raise ValueError("The size of bias should be equal to the channel dimension, "
" while the size of bias is {0} and the channel dimension is "
"{1}".format(shape2[0], c_dim_len))
if len(shape1) == 2:
data2_reshaped = Reshape(data2, [1, shape2[0]], target=utils.CCE)
else:
# NCHW
data2_reshaped = Reshape(data2, [1, shape2[0], 1, 1], target=utils.CCE)
data2_new = akg.lang.ascend.broadcast(data2_reshaped, shape1)
res = akg.lang.ascend.vadd(data1, data2_new)
akg.register_variables("reshape_diff", [data2], data2_reshaped)
return res |
Python | def dynamic_stitch(indices, data, target=utils.CCE):
"""
The values in the data tensor are interleaved into a single tensor.
Args:
indices (tvm.tensor.Tensor): Tensor of type int32.
data (tvm.tensor.Tensor): Tensor of type float16, float32, int32.
Note:
data's shape must be indices.shape + data_fragment_shape, data_fragment_shape can be empty.
Returns:
tvm.tensor.Tensor, has the same type as data.
"""
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in data.shape]
# Check params' shape
utils.check_shape(indices_shape)
utils.check_shape(data_shape)
# Check dtype
utils.ops_dtype_check(indices.dtype, utils.DtypeForDavinci.INT32)
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
assert indices_shape == data_shape[:len(indices_shape)]
length = 1
for x in indices_shape:
length *= x
frac_shape = data_shape[len(indices_shape):]
def get_indexes_from_flat(flat_index, shape):
indexes = []
p = 1
for x in shape:
p *= x
r = flat_index
for s in shape:
p = p // s
q = r // p
indexes.append(q)
r = r % p
return tuple(indexes)
def pick(index, s, *frac_i):
indexes = get_indexes_from_flat(index, indices_shape)
if len(frac_i) > 0:
return akg.tvm.expr.Select(s == indices[indexes], akg.tvm.const(1, data.dtype), \
akg.tvm.const(0, data.dtype)) * data[indexes + frac_i]
return akg.tvm.expr.Select(s == indices[indexes], akg.tvm.const(1, data.dtype), \
akg.tvm.const(0, data.dtype)) * data[indexes]
tmp = akg.tvm.compute([length, length] + frac_shape, lambda *i: pick(i[0], i[1], *i[2:]), name="tmp")
reduce_axis = akg.tvm.reduce_axis((0, length))
output = akg.tvm.compute([length] + frac_shape, lambda *i: akg.tvm.sum(tmp[tuple((reduce_axis,) + i)], axis=reduce_axis))
return output | def dynamic_stitch(indices, data, target=utils.CCE):
"""
The values in the data tensor are interleaved into a single tensor.
Args:
indices (tvm.tensor.Tensor): Tensor of type int32.
data (tvm.tensor.Tensor): Tensor of type float16, float32, int32.
Note:
data's shape must be indices.shape + data_fragment_shape, data_fragment_shape can be empty.
Returns:
tvm.tensor.Tensor, has the same type as data.
"""
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in data.shape]
# Check params' shape
utils.check_shape(indices_shape)
utils.check_shape(data_shape)
# Check dtype
utils.ops_dtype_check(indices.dtype, utils.DtypeForDavinci.INT32)
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])
assert indices_shape == data_shape[:len(indices_shape)]
length = 1
for x in indices_shape:
length *= x
frac_shape = data_shape[len(indices_shape):]
def get_indexes_from_flat(flat_index, shape):
indexes = []
p = 1
for x in shape:
p *= x
r = flat_index
for s in shape:
p = p // s
q = r // p
indexes.append(q)
r = r % p
return tuple(indexes)
def pick(index, s, *frac_i):
indexes = get_indexes_from_flat(index, indices_shape)
if len(frac_i) > 0:
return akg.tvm.expr.Select(s == indices[indexes], akg.tvm.const(1, data.dtype), \
akg.tvm.const(0, data.dtype)) * data[indexes + frac_i]
return akg.tvm.expr.Select(s == indices[indexes], akg.tvm.const(1, data.dtype), \
akg.tvm.const(0, data.dtype)) * data[indexes]
tmp = akg.tvm.compute([length, length] + frac_shape, lambda *i: pick(i[0], i[1], *i[2:]), name="tmp")
reduce_axis = akg.tvm.reduce_axis((0, length))
output = akg.tvm.compute([length] + frac_shape, lambda *i: akg.tvm.sum(tmp[tuple((reduce_axis,) + i)], axis=reduce_axis))
return output |
Python | def erf_ad(head, x):
"""Compute gradient of erf operator using automatic differentiate."""
if product_is_mini():
raise RuntimeError("Not support erf_ad on mini device.")
output = erf.erf(x)
jacs = list(akg.differentiate(output, [x], head))
return jacs[0] | def erf_ad(head, x):
"""Compute gradient of erf operator using automatic differentiate."""
if product_is_mini():
raise RuntimeError("Not support erf_ad on mini device.")
output = erf.erf(x)
jacs = list(akg.differentiate(output, [x], head))
return jacs[0] |
Python | def _transpose_ascend(data, axes):
""" Transpose index from a tensor. """
check_list = ["float16", "float32", "int32"]
dtype = data.dtype
if not (dtype.lower() in check_list):
raise RuntimeError("transpose_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
shape = [x.value for x in data.shape]
utils.check_shape(shape)
assert len(shape) == len(axes), "length of shape and axes should be same"
# if axes[-1] == len(shape) - 2:
# assert shape[-1] % 16 == 0, "transpose last axis only support 16xN number"
output = akg.topi.transpose(data, axes)
return output | def _transpose_ascend(data, axes):
""" Transpose index from a tensor. """
check_list = ["float16", "float32", "int32"]
dtype = data.dtype
if not (dtype.lower() in check_list):
raise RuntimeError("transpose_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
shape = [x.value for x in data.shape]
utils.check_shape(shape)
assert len(shape) == len(axes), "length of shape and axes should be same"
# if axes[-1] == len(shape) - 2:
# assert shape[-1] % 16 == 0, "transpose last axis only support 16xN number"
output = akg.topi.transpose(data, axes)
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.