language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def FusedBnGrad1(dy, data, mean, target=utils.CCE):
"""Gradient for fused_batch_norm, reduce axis H and W."""
check_inputs(1, dy, data, mean)
dim_info = set_dim_func_bng1_(dy)[0]
attrs = attrs_bng1_.copy()
single_sum = attrs.pop("single_sum", False)
ori_dtype = dy.dtype
if ori_dtype != "float32":
dy = akg.topi.cast(dy, "float32")
data = akg.topi.cast(data, "float32")
axes = (2, 3)
dbeta_red_hw = sum_data(dy, axes, keepdims=True, single_sum=single_sum)
mean = akg.lang.ascend.broadcast(mean, data.shape)
data_minus_mean = akg.tvm.compute(
data.shape, lambda *i: data(*i) - mean(*i), "data_minus_mean")
dgamma_param = akg.tvm.compute(
data.shape, lambda *i: dy(*i) * data_minus_mean(*i), "dgamma_param")
dgamma_red_hw = sum_data(
dgamma_param, axes, keepdims=True, single_sum=single_sum)
if dim_info != "":
attrs["dim"] = dim_info
attrs["custom_tiling"] = bng1_tiling_strategy(data)
return dgamma_red_hw, dbeta_red_hw, data_minus_mean, attrs | def FusedBnGrad1(dy, data, mean, target=utils.CCE):
"""Gradient for fused_batch_norm, reduce axis H and W."""
check_inputs(1, dy, data, mean)
dim_info = set_dim_func_bng1_(dy)[0]
attrs = attrs_bng1_.copy()
single_sum = attrs.pop("single_sum", False)
ori_dtype = dy.dtype
if ori_dtype != "float32":
dy = akg.topi.cast(dy, "float32")
data = akg.topi.cast(data, "float32")
axes = (2, 3)
dbeta_red_hw = sum_data(dy, axes, keepdims=True, single_sum=single_sum)
mean = akg.lang.ascend.broadcast(mean, data.shape)
data_minus_mean = akg.tvm.compute(
data.shape, lambda *i: data(*i) - mean(*i), "data_minus_mean")
dgamma_param = akg.tvm.compute(
data.shape, lambda *i: dy(*i) * data_minus_mean(*i), "dgamma_param")
dgamma_red_hw = sum_data(
dgamma_param, axes, keepdims=True, single_sum=single_sum)
if dim_info != "":
attrs["dim"] = dim_info
attrs["custom_tiling"] = bng1_tiling_strategy(data)
return dgamma_red_hw, dbeta_red_hw, data_minus_mean, attrs |
Python | def FusedBnGrad2(dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape, target=utils.CCE):
"""Second part of fused_bn_grad, reduce axis N, calculate the result of dgamma and dbeta."""
check_inputs(2, dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape)
attrs = attrs_bng2_.copy()
dim_info = set_dim_func_bng2_(data_shape)[0]
m = data_shape[0] * data_shape[2] * data_shape[3]
neg_m_rec = akg.tvm.const((-1.0 / m), dtype=var.dtype)
eps = akg.tvm.const(eps, var.dtype)
shape = get_shape(var)
dbeta = akg.topi.sum(dbeta_red_hw, 0, keepdims=True)
# rsqvar = 1/sqrt(var + eps)
v = akg.tvm.compute(shape, lambda *i: var(*i) + eps, name="var_plus_eps")
if product_is_mini():
v = akg.topi.cast(v, "float16")
rsqvar = akg.tvm.compute(shape,
lambda *i:
akg.tvm.exp(akg.tvm.log(v(*i)) *
akg.tvm.const(-0.5, v.dtype)),
name="rsqvar", attrs={'no_inline': 1})
if product_is_mini():
rsqvar = akg.topi.cast(rsqvar, "float32")
dgamma_red_n = akg.topi.sum(dgamma_red_hw, 0, keepdims=True)
dgamma = akg.tvm.compute(shape,
lambda *i: dgamma_red_n(*i) * rsqvar(*i),
name="dgamma")
# rs = gamma / sqrt(var+eps)
rs = akg.tvm.compute(shape,
lambda *i: gamma(*i) * rsqvar(*i),
name="rs", attrs={'no_inline': 1})
rs_div_m = akg.tvm.compute(shape,
lambda *i: rs(*i) * neg_m_rec,
name="rs_div_m", attrs={'no_inline': 1})
dgamma_dx = akg.tvm.compute(shape,
lambda *i:
rs_div_m(*i) * rsqvar(*i) * dgamma(*i),
name="dgamma_dx")
dbeta_dx = akg.tvm.compute(shape,
lambda *i: rs_div_m(*i) * dbeta(*i),
name="dbeta_dx")
if dim_info != "":
attrs["dim"] = dim_info
return dgamma, dbeta, rs, dgamma_dx, dbeta_dx, attrs | def FusedBnGrad2(dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape, target=utils.CCE):
"""Second part of fused_bn_grad, reduce axis N, calculate the result of dgamma and dbeta."""
check_inputs(2, dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape)
attrs = attrs_bng2_.copy()
dim_info = set_dim_func_bng2_(data_shape)[0]
m = data_shape[0] * data_shape[2] * data_shape[3]
neg_m_rec = akg.tvm.const((-1.0 / m), dtype=var.dtype)
eps = akg.tvm.const(eps, var.dtype)
shape = get_shape(var)
dbeta = akg.topi.sum(dbeta_red_hw, 0, keepdims=True)
# rsqvar = 1/sqrt(var + eps)
v = akg.tvm.compute(shape, lambda *i: var(*i) + eps, name="var_plus_eps")
if product_is_mini():
v = akg.topi.cast(v, "float16")
rsqvar = akg.tvm.compute(shape,
lambda *i:
akg.tvm.exp(akg.tvm.log(v(*i)) *
akg.tvm.const(-0.5, v.dtype)),
name="rsqvar", attrs={'no_inline': 1})
if product_is_mini():
rsqvar = akg.topi.cast(rsqvar, "float32")
dgamma_red_n = akg.topi.sum(dgamma_red_hw, 0, keepdims=True)
dgamma = akg.tvm.compute(shape,
lambda *i: dgamma_red_n(*i) * rsqvar(*i),
name="dgamma")
# rs = gamma / sqrt(var+eps)
rs = akg.tvm.compute(shape,
lambda *i: gamma(*i) * rsqvar(*i),
name="rs", attrs={'no_inline': 1})
rs_div_m = akg.tvm.compute(shape,
lambda *i: rs(*i) * neg_m_rec,
name="rs_div_m", attrs={'no_inline': 1})
dgamma_dx = akg.tvm.compute(shape,
lambda *i:
rs_div_m(*i) * rsqvar(*i) * dgamma(*i),
name="dgamma_dx")
dbeta_dx = akg.tvm.compute(shape,
lambda *i: rs_div_m(*i) * dbeta(*i),
name="dbeta_dx")
if dim_info != "":
attrs["dim"] = dim_info
return dgamma, dbeta, rs, dgamma_dx, dbeta_dx, attrs |
Python | def _truncatemod_compute_mini(x, y):
"""
Computes truncatemod value of x and y on mini device.
Args:
x(tvm.tensor.Tensor): Tensor, float16.
y(tvm.tensor.Tensor): Tensor with same type as x.
Returns:
tvm.tensor.Tensor of same type as x.
"""
def truncatemod_positive(x_abs, y_abs):
"""Computes truncatemod value for positive number"""
x_abs_fp32 = akg.topi.cast(x_abs, "float32")
y_abs_fp32 = akg.topi.cast(y_abs, "float32")
def truncatemod_func(a, b):
"""function for truncatemod formula"""
# For positive numbers, floor and trunc are equivalent
return akg.topi.subtract(a, akg.topi.multiply(b, Cast(Floor(Divide(a, b, utils.CCE)), b.dtype, target=utils.CCE)))
mod_value = truncatemod_func(x_abs_fp32, y_abs_fp32)
# Because there are precision errors in division on mini, etc.,
# the calculation results need to be corrected
mod_value = truncatemod_func(mod_value, y_abs_fp32)
mod_value = akg.topi.cast(mod_value, "float16")
mod_value = akg.tvm.compute(mod_value.shape,
lambda *indice: akg.tvm.expr.Select(mod_value(*indice) >= y_abs(*indice),
mod_value(*indice) - y_abs(*indice),
mod_value(*indice)),
name="mod_value")
return mod_value
_, _, out_shape = produce_shapes(utils.get_shape(x), utils.get_shape(y))
x = akg.topi.broadcast_to(x, out_shape)
y = akg.topi.broadcast_to(y, out_shape)
# Scenarios for correcting calculation results are complex,
# using absolute values can simplify the scenario:
# truncatemod(x,y) = Sign(x) * truncatemod(|x|, |y|)
mod_abs = truncatemod_positive(akg.topi.abs(x), akg.topi.abs(y))
mod = akg.topi.multiply(akg.topi.sign(x), mod_abs)
return mod | def _truncatemod_compute_mini(x, y):
"""
Computes truncatemod value of x and y on mini device.
Args:
x(tvm.tensor.Tensor): Tensor, float16.
y(tvm.tensor.Tensor): Tensor with same type as x.
Returns:
tvm.tensor.Tensor of same type as x.
"""
def truncatemod_positive(x_abs, y_abs):
"""Computes truncatemod value for positive number"""
x_abs_fp32 = akg.topi.cast(x_abs, "float32")
y_abs_fp32 = akg.topi.cast(y_abs, "float32")
def truncatemod_func(a, b):
"""function for truncatemod formula"""
# For positive numbers, floor and trunc are equivalent
return akg.topi.subtract(a, akg.topi.multiply(b, Cast(Floor(Divide(a, b, utils.CCE)), b.dtype, target=utils.CCE)))
mod_value = truncatemod_func(x_abs_fp32, y_abs_fp32)
# Because there are precision errors in division on mini, etc.,
# the calculation results need to be corrected
mod_value = truncatemod_func(mod_value, y_abs_fp32)
mod_value = akg.topi.cast(mod_value, "float16")
mod_value = akg.tvm.compute(mod_value.shape,
lambda *indice: akg.tvm.expr.Select(mod_value(*indice) >= y_abs(*indice),
mod_value(*indice) - y_abs(*indice),
mod_value(*indice)),
name="mod_value")
return mod_value
_, _, out_shape = produce_shapes(utils.get_shape(x), utils.get_shape(y))
x = akg.topi.broadcast_to(x, out_shape)
y = akg.topi.broadcast_to(y, out_shape)
# Scenarios for correcting calculation results are complex,
# using absolute values can simplify the scenario:
# truncatemod(x,y) = Sign(x) * truncatemod(|x|, |y|)
mod_abs = truncatemod_positive(akg.topi.abs(x), akg.topi.abs(y))
mod = akg.topi.multiply(akg.topi.sign(x), mod_abs)
return mod |
Python | def truncatemod_positive(x_abs, y_abs):
"""Computes truncatemod value for positive number"""
x_abs_fp32 = akg.topi.cast(x_abs, "float32")
y_abs_fp32 = akg.topi.cast(y_abs, "float32")
def truncatemod_func(a, b):
"""function for truncatemod formula"""
# For positive numbers, floor and trunc are equivalent
return akg.topi.subtract(a, akg.topi.multiply(b, Cast(Floor(Divide(a, b, utils.CCE)), b.dtype, target=utils.CCE)))
mod_value = truncatemod_func(x_abs_fp32, y_abs_fp32)
# Because there are precision errors in division on mini, etc.,
# the calculation results need to be corrected
mod_value = truncatemod_func(mod_value, y_abs_fp32)
mod_value = akg.topi.cast(mod_value, "float16")
mod_value = akg.tvm.compute(mod_value.shape,
lambda *indice: akg.tvm.expr.Select(mod_value(*indice) >= y_abs(*indice),
mod_value(*indice) - y_abs(*indice),
mod_value(*indice)),
name="mod_value")
return mod_value | def truncatemod_positive(x_abs, y_abs):
"""Computes truncatemod value for positive number"""
x_abs_fp32 = akg.topi.cast(x_abs, "float32")
y_abs_fp32 = akg.topi.cast(y_abs, "float32")
def truncatemod_func(a, b):
"""function for truncatemod formula"""
# For positive numbers, floor and trunc are equivalent
return akg.topi.subtract(a, akg.topi.multiply(b, Cast(Floor(Divide(a, b, utils.CCE)), b.dtype, target=utils.CCE)))
mod_value = truncatemod_func(x_abs_fp32, y_abs_fp32)
# Because there are precision errors in division on mini, etc.,
# the calculation results need to be corrected
mod_value = truncatemod_func(mod_value, y_abs_fp32)
mod_value = akg.topi.cast(mod_value, "float16")
mod_value = akg.tvm.compute(mod_value.shape,
lambda *indice: akg.tvm.expr.Select(mod_value(*indice) >= y_abs(*indice),
mod_value(*indice) - y_abs(*indice),
mod_value(*indice)),
name="mod_value")
return mod_value |
Python | def reduce_any_d(x, axis=None, keepdims=False):
"""
Reduce a tensor on a certain axis based on max.
Args:
x (tvm.tensor.Tensor): The input tensor to reduce. Should be of type int8.
axis (Union[list, tuple, int, None]): The dimensions to reduce. If None, all dimensions will be reduced.
each dim must be in the range [-len(data.shape), len(data.shape) - 1].
keepdims (Union[bool, None]): If True, retains reduced dimensions with length 1, defaults to False.
Returns:
tvm.tensor.Tensor of same type as input tensor x.
"""
# check type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.INT8)
utils.check_shape(x.shape)
# check axis
utils.reduce_axis_check(x.shape, axis)
refined_axis = refine_reduce_axis(x, axis)
if len(set(refined_axis)) == len(x.shape) and not keepdims:
keepdims = True
res = _reduce_any_d_compute(x, refined_axis, keepdims)
if len(set(refined_axis)) == len(x.shape):
res = topi.reshape(res, (1, ))
return res | def reduce_any_d(x, axis=None, keepdims=False):
"""
Reduce a tensor on a certain axis based on max.
Args:
x (tvm.tensor.Tensor): The input tensor to reduce. Should be of type int8.
axis (Union[list, tuple, int, None]): The dimensions to reduce. If None, all dimensions will be reduced.
each dim must be in the range [-len(data.shape), len(data.shape) - 1].
keepdims (Union[bool, None]): If True, retains reduced dimensions with length 1, defaults to False.
Returns:
tvm.tensor.Tensor of same type as input tensor x.
"""
# check type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.INT8)
utils.check_shape(x.shape)
# check axis
utils.reduce_axis_check(x.shape, axis)
refined_axis = refine_reduce_axis(x, axis)
if len(set(refined_axis)) == len(x.shape) and not keepdims:
keepdims = True
res = _reduce_any_d_compute(x, refined_axis, keepdims)
if len(set(refined_axis)) == len(x.shape):
res = topi.reshape(res, (1, ))
return res |
Python | def space_to_batch_nd(data, block_shape, paddings):
"""
The N-D version of SpaceToBatch.
Zero padding, then rearranging spatial data blocks into batch.
Args:
data (tvn.tensor.Tensor): Spacial data of type float16, float32, int8, uint8, int32.
block_shape (Union[tuple, list]): 1-D shape of length `L`.
paddings (Union[tuple, list]): 2-D list of shape `[L][2]`, all values must be greater than or equal to 0.
Returns:
tvn.tensor.Tensor, has the same type as inputs
"""
check_inputs(data, block_shape, paddings)
dim_info, _ = space_to_batch_nd_set_dim_func(data, block_shape, paddings)
attrs = {"dim": dim_info}
block_shape = list(block_shape)
pad_before = []
pad_after = []
n = len(data.shape)
m = len(block_shape)
_get_pad_before_and_after(n, m, paddings, pad_before, pad_after)
prod_of_block_shape = reduce(lambda x, y: x * y, block_shape)
data_shape_padded = nn.pad(data, pad_before, pad_after)
M = len(block_shape)
batch = data_shape_padded.shape[0]
spatial_shape = data_shape_padded.shape[1:1 + M]
remain_shape = data_shape_padded.shape[1 + M:]
oshape = [batch * prod_of_block_shape] + \
[dim // bsize for dim, bsize in zip(spatial_shape, block_shape)] + remain_shape
def map_index(*index):
ibatch = index[0] % batch
ispatial = list(index[1:1 + M])
iremain = list(index[1 + M:])
coef = index[0] // batch
for i in reversed(range(M)):
ispatial[i] = coef % block_shape[i] + index[1 + i] * block_shape[i]
coef = coef // block_shape[i]
return [ibatch] + ispatial + iremain
output = akg.tvm.compute(oshape, lambda *i: data_shape_padded(*map_index(*i)), name='output')
return output, attrs | def space_to_batch_nd(data, block_shape, paddings):
"""
The N-D version of SpaceToBatch.
Zero padding, then rearranging spatial data blocks into batch.
Args:
data (tvn.tensor.Tensor): Spacial data of type float16, float32, int8, uint8, int32.
block_shape (Union[tuple, list]): 1-D shape of length `L`.
paddings (Union[tuple, list]): 2-D list of shape `[L][2]`, all values must be greater than or equal to 0.
Returns:
tvn.tensor.Tensor, has the same type as inputs
"""
check_inputs(data, block_shape, paddings)
dim_info, _ = space_to_batch_nd_set_dim_func(data, block_shape, paddings)
attrs = {"dim": dim_info}
block_shape = list(block_shape)
pad_before = []
pad_after = []
n = len(data.shape)
m = len(block_shape)
_get_pad_before_and_after(n, m, paddings, pad_before, pad_after)
prod_of_block_shape = reduce(lambda x, y: x * y, block_shape)
data_shape_padded = nn.pad(data, pad_before, pad_after)
M = len(block_shape)
batch = data_shape_padded.shape[0]
spatial_shape = data_shape_padded.shape[1:1 + M]
remain_shape = data_shape_padded.shape[1 + M:]
oshape = [batch * prod_of_block_shape] + \
[dim // bsize for dim, bsize in zip(spatial_shape, block_shape)] + remain_shape
def map_index(*index):
ibatch = index[0] % batch
ispatial = list(index[1:1 + M])
iremain = list(index[1 + M:])
coef = index[0] // batch
for i in reversed(range(M)):
ispatial[i] = coef % block_shape[i] + index[1 + i] * block_shape[i]
coef = coef // block_shape[i]
return [ibatch] + ispatial + iremain
output = akg.tvm.compute(oshape, lambda *i: data_shape_padded(*map_index(*i)), name='output')
return output, attrs |
Python | def Cast(data, dst_type, target=utils.CCE):
"""
cast data to target type.
Args:
data (tvm.tensor.Tensor): Tensor to be casted.
dst_type (str): target cast type.
Returns:
tvm.tensor.Tensor, type is dst_type.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _cast_ascend(data, dst_type)
else:
return _cast(data, dst_type) | def Cast(data, dst_type, target=utils.CCE):
"""
cast data to target type.
Args:
data (tvm.tensor.Tensor): Tensor to be casted.
dst_type (str): target cast type.
Returns:
tvm.tensor.Tensor, type is dst_type.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _cast_ascend(data, dst_type)
else:
return _cast(data, dst_type) |
Python | def ReduceOr(inputs, axis=None, keepdims=False, target=utils.CCE):
"""
Compute the logical or of elements across dimensions of a tensor.
Args:
inputs (tvm.tensor.Tensor): Tensor.
axis (Union[list, tuple, int, None]): If the list or tuple is empty, the axis equal to None.
keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length.
Returns:
tvm.tensor.Tensor, has same type as input. If keepdims is True, all reduced dimensions are retained
with length 1, else these reduced axis will be eliminate.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
axis = refine_reduce_axis(inputs, axis)
utils.check_shape(inputs.shape)
output = akg.topi.any(inputs, axis=axis, keepdims=keepdims)
return output | def ReduceOr(inputs, axis=None, keepdims=False, target=utils.CCE):
"""
Compute the logical or of elements across dimensions of a tensor.
Args:
inputs (tvm.tensor.Tensor): Tensor.
axis (Union[list, tuple, int, None]): If the list or tuple is empty, the axis equal to None.
keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length.
Returns:
tvm.tensor.Tensor, has same type as input. If keepdims is True, all reduced dimensions are retained
with length 1, else these reduced axis will be eliminate.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
axis = refine_reduce_axis(inputs, axis)
utils.check_shape(inputs.shape)
output = akg.topi.any(inputs, axis=axis, keepdims=keepdims)
return output |
Python | def prelu_grad(dy, A, w):
"""
brief Computes backgrad prelu value of a tensor.
\f[
dw = sum(dy * \\partial(prelu(A)) / \\partial w)
dA = A > 0 ? dy : dy * w
\f]
param inputs akg.tvm.Tensor of type float16, float32
return akg.tvm.Tensor of same type and shape as inputs
"""
shape = [x.value for x in dy.shape]
dtype = dy.dtype
shape1 = [x.value for x in A.shape]
dtype1 = A.dtype
shape2 = [x.value for x in w.shape]
dtype2 = w.dtype
assert len(shape) == 4, "only support 4-dim pooling" # NCHW
assert len(shape1) == 4, "only support 4-dim pooling" # NCHW
assert len(shape2) == 1, "only support 1-dim a"
assert (shape2[0] == shape1[1] or shape2[0] == 1), "there is only two values are legitimate: 1, or the number of channels at input. Default: 1"
assert (shape[0] == shape1[0] and shape[1] == shape1[1] and shape[2] == shape1[2] and shape[3] == shape1[3]), "dim number must be equal"
check_list = ["float16", "float32"]
if not (dtype1.lower() in check_list and dtype2.lower() in check_list and dtype.lower() in check_list):
raise RuntimeError("tile_cce only support %s while dtype is %s and %s and %s" % (",".join(check_list), dtype, dtype1, dtype2))
utils.check_shape(shape)
utils.check_shape(shape1)
utils.check_shape(shape2)
def grad_dsl():
w_reshape = akg.topi.reshape(w, (1, shape2[0], 1, 1))
w_broadcast = akg.topi.broadcast_to(w_reshape, shape1)
dA = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
dy(*i), dy(*i) * w_broadcast(*i)
))
# dy * \partial(prelu(A)) / \partial w
dw_intermediate = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
akg.tvm.const(0, dtype), dy(*i) * A(*i)
))
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw_intermediate = akg.topi.cast(dw_intermediate, "float32")
if shape2[0] == 1:
# all channel share one w
#dw = akg.topi.sum(dw_intermediate)
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=1)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=2)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
#dw = akg.tvm.compute(shape, lambda *indice: akg.tvm.sum(dw_intermediate(*indice), axis=[0,1,2,3]), name="dw")
#dw = akg.lang.ascend.sum(dw_intermediate, axis=3, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=2, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=1, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=0, keepdims=False)
else:
# all channel use separate w
# dw = akg.topi.sum(dw_intermediate, axis=[0,2,3]) # Accuracy is not up to standard
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw = akg.topi.cast(dw, "float16")
return dA, dw
attrs = {"pragma_checkcoincident": 0, "pragma_modshift": 1}
return grad_dsl(), attrs | def prelu_grad(dy, A, w):
"""
brief Computes backgrad prelu value of a tensor.
\f[
dw = sum(dy * \\partial(prelu(A)) / \\partial w)
dA = A > 0 ? dy : dy * w
\f]
param inputs akg.tvm.Tensor of type float16, float32
return akg.tvm.Tensor of same type and shape as inputs
"""
shape = [x.value for x in dy.shape]
dtype = dy.dtype
shape1 = [x.value for x in A.shape]
dtype1 = A.dtype
shape2 = [x.value for x in w.shape]
dtype2 = w.dtype
assert len(shape) == 4, "only support 4-dim pooling" # NCHW
assert len(shape1) == 4, "only support 4-dim pooling" # NCHW
assert len(shape2) == 1, "only support 1-dim a"
assert (shape2[0] == shape1[1] or shape2[0] == 1), "there is only two values are legitimate: 1, or the number of channels at input. Default: 1"
assert (shape[0] == shape1[0] and shape[1] == shape1[1] and shape[2] == shape1[2] and shape[3] == shape1[3]), "dim number must be equal"
check_list = ["float16", "float32"]
if not (dtype1.lower() in check_list and dtype2.lower() in check_list and dtype.lower() in check_list):
raise RuntimeError("tile_cce only support %s while dtype is %s and %s and %s" % (",".join(check_list), dtype, dtype1, dtype2))
utils.check_shape(shape)
utils.check_shape(shape1)
utils.check_shape(shape2)
def grad_dsl():
w_reshape = akg.topi.reshape(w, (1, shape2[0], 1, 1))
w_broadcast = akg.topi.broadcast_to(w_reshape, shape1)
dA = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
dy(*i), dy(*i) * w_broadcast(*i)
))
# dy * \partial(prelu(A)) / \partial w
dw_intermediate = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
akg.tvm.const(0, dtype), dy(*i) * A(*i)
))
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw_intermediate = akg.topi.cast(dw_intermediate, "float32")
if shape2[0] == 1:
# all channel share one w
#dw = akg.topi.sum(dw_intermediate)
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=1)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=2)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
#dw = akg.tvm.compute(shape, lambda *indice: akg.tvm.sum(dw_intermediate(*indice), axis=[0,1,2,3]), name="dw")
#dw = akg.lang.ascend.sum(dw_intermediate, axis=3, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=2, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=1, keepdims=False)
#dw = akg.lang.ascend.sum(dw_intermediate, axis=0, keepdims=False)
else:
# all channel use separate w
# dw = akg.topi.sum(dw_intermediate, axis=[0,2,3]) # Accuracy is not up to standard
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw = akg.topi.cast(dw, "float16")
return dA, dw
attrs = {"pragma_checkcoincident": 0, "pragma_modshift": 1}
return grad_dsl(), attrs |
Python | def apply_ada_max_run(shape, dtype, epsilon, attrs=None):
"""run function for dsl function apply_ada_max."""
shapes = [shape, shape, shape, shape, (1,), (1,), (1,), (1,)]
dtypes = [dtype] * len(shapes)
op_attrs = [epsilon]
mod = utils.op_build_test(apply_ada_max, shapes, dtypes,
op_attrs=op_attrs, kernel_name="apply_ada_max", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_delta", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) | def apply_ada_max_run(shape, dtype, epsilon, attrs=None):
"""run function for dsl function apply_ada_max."""
shapes = [shape, shape, shape, shape, (1,), (1,), (1,), (1,)]
dtypes = [dtype] * len(shapes)
op_attrs = [epsilon]
mod = utils.op_build_test(apply_ada_max, shapes, dtypes,
op_attrs=op_attrs, kernel_name="apply_ada_max", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_delta", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) |
Python | def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
m = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
v = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
beta1 = np.random.rand(1).astype(dtype)
beta2 = np.random.rand(1).astype(dtype)
beta1_power = beta1 * beta1
inputs = [var, m, v, grad, lr, beta1, beta1_power, beta2]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_m = beta1 * m + (one - beta1) * grad
out_v = np.maximum(beta2 * v, np.abs(grad))
out_var = var - lr * out_m / ((one - beta1_power) * (out_v + epsilon))
expects = [out_var, out_m, out_v]
args = inputs
return inputs, expects, args | def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
m = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
v = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
beta1 = np.random.rand(1).astype(dtype)
beta2 = np.random.rand(1).astype(dtype)
beta1_power = beta1 * beta1
inputs = [var, m, v, grad, lr, beta1, beta1_power, beta2]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_m = beta1 * m + (one - beta1) * grad
out_v = np.maximum(beta2 * v, np.abs(grad))
out_var = var - lr * out_m / ((one - beta1_power) * (out_v + epsilon))
expects = [out_var, out_m, out_v]
args = inputs
return inputs, expects, args |
Python | def Exp(data, target=utils.CCE):
"""
Calculate exponential of input data.
Args:
input (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor, has the same type as input.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _exp_ascend(data)
else:
return _exp(data) | def Exp(data, target=utils.CCE):
"""
Calculate exponential of input data.
Args:
input (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor, has the same type as input.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _exp_ascend(data)
else:
return _exp(data) |
Python | def produce_shapes(shape1, shape2):
"""two input shapes produce three output shape."""
shape1 = list(shape1)
shape2 = list(shape2)
flag = 0
if len(shape1) < len(shape2):
shape1, shape2 = shape2, shape1
flag = 1
output_shape_len = len(shape1)
dec = output_shape_len - len(shape2)
for i in range(dec):
shape2 = [1] + shape2
out_shape = []
for i in range(output_shape_len):
if (shape1[i] != shape2[i]) and (shape1[i] != 1) and (shape2[i] != 1):
raise RuntimeError("input shapes not match!")
if isinstance(shape1[i], int) and isinstance(shape2[i], int) and shape1[i] > shape2[i]:
out_shape.append(shape1[i])
else:
out_shape.append(shape2[i])
if flag == 1:
shape1, shape2 = shape2, shape1
return shape1, shape2, out_shape | def produce_shapes(shape1, shape2):
"""two input shapes produce three output shape."""
shape1 = list(shape1)
shape2 = list(shape2)
flag = 0
if len(shape1) < len(shape2):
shape1, shape2 = shape2, shape1
flag = 1
output_shape_len = len(shape1)
dec = output_shape_len - len(shape2)
for i in range(dec):
shape2 = [1] + shape2
out_shape = []
for i in range(output_shape_len):
if (shape1[i] != shape2[i]) and (shape1[i] != 1) and (shape2[i] != 1):
raise RuntimeError("input shapes not match!")
if isinstance(shape1[i], int) and isinstance(shape2[i], int) and shape1[i] > shape2[i]:
out_shape.append(shape1[i])
else:
out_shape.append(shape2[i])
if flag == 1:
shape1, shape2 = shape2, shape1
return shape1, shape2, out_shape |
Python | def mul_axis_sum(data, axes, keepdims, name=None, attrs=None):
"""calculate sum one by one."""
if name is None and attrs is None:
for axis in axes:
data = akg.topi.sum(data, axis=axis, keepdims=keepdims)
else:
shape = [x.value for x in data.shape]
for axis in axes[:-1]:
data = akg.topi.sum(data, axis=axis, keepdims=keepdims)
l_axis = shape[axes[-1]]
k = akg.tvm.reduce_axis((0, l_axis), name="k")
res_shape = [1 if i in axes else shape[i] for i in range(len(shape))]
def sumfunc(*i):
new_i = list(i)
new_i[axes[-1]] = k
return akg.tvm.sum(data(*tuple(new_i)), axis=k)
if name is None:
data = akg.tvm.compute(res_shape, sumfunc, attrs=attrs)
elif attrs is None:
data = akg.tvm.compute(res_shape, sumfunc, name=name)
else:
data = akg.tvm.compute(res_shape, sumfunc, name=name, attrs=attrs)
return data | def mul_axis_sum(data, axes, keepdims, name=None, attrs=None):
"""calculate sum one by one."""
if name is None and attrs is None:
for axis in axes:
data = akg.topi.sum(data, axis=axis, keepdims=keepdims)
else:
shape = [x.value for x in data.shape]
for axis in axes[:-1]:
data = akg.topi.sum(data, axis=axis, keepdims=keepdims)
l_axis = shape[axes[-1]]
k = akg.tvm.reduce_axis((0, l_axis), name="k")
res_shape = [1 if i in axes else shape[i] for i in range(len(shape))]
def sumfunc(*i):
new_i = list(i)
new_i[axes[-1]] = k
return akg.tvm.sum(data(*tuple(new_i)), axis=k)
if name is None:
data = akg.tvm.compute(res_shape, sumfunc, attrs=attrs)
elif attrs is None:
data = akg.tvm.compute(res_shape, sumfunc, name=name)
else:
data = akg.tvm.compute(res_shape, sumfunc, name=name, attrs=attrs)
return data |
Python | def cal_pad_shapes_by_strategy(shape, kernel, stride, strategy):
"""
Calculate the pad size and output shape by padding strategy.
Args:
shape (Union[list, tuple]): Input shape, a list or tuple of 5 int numbers.
kernel (Union[list, tuple]): List or tuple of two int numbers for pooling window's size.
stride (Union[list, tuple]): List or tuple of two int numbers for window's stride.
strategy (Union[str, list]): A string or list for padding strategy, should be 'VALID',
'SAME' or instance of list(including four int numbers, as 'CONSTANTS' strategy).
Returns:
pad_sizes: Padding sizes(a list of four int numbers: [H_head_pad, H_tail_pad, W_head_pad, W_tail_pad]).
out_shape: Output tensor's shape(a list of two int numbers: [output_H, output_W]).
"""
pool_shapes = [shape[2], shape[3]]
out_shape = []
pad_sizes = []
contrain_var = False
for sh in [shape, kernel, stride]:
for s in sh:
if not isinstance(s, (int, akg.tvm.expr.IntImm)):
contrain_var = True
if isinstance(strategy, str) and strategy.upper() == "VALID":
for i in range(2):
out_shape.append(math.ceil((pool_shapes[i] - (kernel[i] - 1)) / stride[i]))
if out_shape[i] <= 0:
raise ValueError("With pad mode {0}, the value of the kernel "
"(or window) size should be less than or "
"equal to that of the corresponding input "
"shape!".format(strategy))
pad_sizes += [0, 0] # for h
pad_sizes += [0, 0] # for w
elif isinstance(strategy, str) and strategy.upper() == "SAME":
for i in range(2):
out_shape.append(math.ceil(pool_shapes[i] / stride[i]))
diff_shape = ((out_shape[i] - 1) * stride[i] + kernel[i]) - pool_shapes[i]
diff_shape = diff_shape if diff_shape > 0 else 0
pad_shape = [math.floor(diff_shape / 2), math.ceil(diff_shape / 2)]
pad_sizes += pad_shape
elif isinstance(strategy, (list, tuple)):
if len(strategy) != 4:
raise RuntimeError(
"When with strateg 'CONSTANTS', strategy should be list or tuple of 4 int numbers but get {}".
format(strategy))
vc_util.check_pad('pad', strategy, 4)
for i in range(2):
pad_shape = [strategy[i * 2], strategy[i * 2 + 1]]
if contrain_var:
out_shape.append(akg.tvm.floordiv((pool_shapes[i] +
(pad_shape[0] + pad_shape[1]) - kernel[i]), (stride[i])) + 1)
else:
out_shape.append(math.floor((pool_shapes[i] +
(pad_shape[0] + pad_shape[1]) - kernel[i]) / float(stride[i])) + 1)
pad_sizes += pad_shape
height, width = out_shape
if (isinstance(height, int) and height <= 0) or (isinstance(width, int) and width <= 0):
raise ValueError("The height and witdth of calculated output"
" shape [{}, {}] are invalid. Please check the "
"input parameters!".format(height, width))
else:
raise RuntimeError("Padding strategies only support 'VALID', 'CONSTANTS' or 'SAME', but get {}".
format(strategy))
return pad_sizes, out_shape | def cal_pad_shapes_by_strategy(shape, kernel, stride, strategy):
"""
Calculate the pad size and output shape by padding strategy.
Args:
shape (Union[list, tuple]): Input shape, a list or tuple of 5 int numbers.
kernel (Union[list, tuple]): List or tuple of two int numbers for pooling window's size.
stride (Union[list, tuple]): List or tuple of two int numbers for window's stride.
strategy (Union[str, list]): A string or list for padding strategy, should be 'VALID',
'SAME' or instance of list(including four int numbers, as 'CONSTANTS' strategy).
Returns:
pad_sizes: Padding sizes(a list of four int numbers: [H_head_pad, H_tail_pad, W_head_pad, W_tail_pad]).
out_shape: Output tensor's shape(a list of two int numbers: [output_H, output_W]).
"""
pool_shapes = [shape[2], shape[3]]
out_shape = []
pad_sizes = []
contrain_var = False
for sh in [shape, kernel, stride]:
for s in sh:
if not isinstance(s, (int, akg.tvm.expr.IntImm)):
contrain_var = True
if isinstance(strategy, str) and strategy.upper() == "VALID":
for i in range(2):
out_shape.append(math.ceil((pool_shapes[i] - (kernel[i] - 1)) / stride[i]))
if out_shape[i] <= 0:
raise ValueError("With pad mode {0}, the value of the kernel "
"(or window) size should be less than or "
"equal to that of the corresponding input "
"shape!".format(strategy))
pad_sizes += [0, 0] # for h
pad_sizes += [0, 0] # for w
elif isinstance(strategy, str) and strategy.upper() == "SAME":
for i in range(2):
out_shape.append(math.ceil(pool_shapes[i] / stride[i]))
diff_shape = ((out_shape[i] - 1) * stride[i] + kernel[i]) - pool_shapes[i]
diff_shape = diff_shape if diff_shape > 0 else 0
pad_shape = [math.floor(diff_shape / 2), math.ceil(diff_shape / 2)]
pad_sizes += pad_shape
elif isinstance(strategy, (list, tuple)):
if len(strategy) != 4:
raise RuntimeError(
"When with strateg 'CONSTANTS', strategy should be list or tuple of 4 int numbers but get {}".
format(strategy))
vc_util.check_pad('pad', strategy, 4)
for i in range(2):
pad_shape = [strategy[i * 2], strategy[i * 2 + 1]]
if contrain_var:
out_shape.append(akg.tvm.floordiv((pool_shapes[i] +
(pad_shape[0] + pad_shape[1]) - kernel[i]), (stride[i])) + 1)
else:
out_shape.append(math.floor((pool_shapes[i] +
(pad_shape[0] + pad_shape[1]) - kernel[i]) / float(stride[i])) + 1)
pad_sizes += pad_shape
height, width = out_shape
if (isinstance(height, int) and height <= 0) or (isinstance(width, int) and width <= 0):
raise ValueError("The height and witdth of calculated output"
" shape [{}, {}] are invalid. Please check the "
"input parameters!".format(height, width))
else:
raise RuntimeError("Padding strategies only support 'VALID', 'CONSTANTS' or 'SAME', but get {}".
format(strategy))
return pad_sizes, out_shape |
Python | def broadcast_gradient_args(x, y):
"""
Return the reduction indices for computing gradients of x op y with broadcast.
Args:
x (Union[list, tuple]): the shape of data input
y (Union[list, tuple]): the shape of data input
Returns:
rx (list): the reduction indices for computing gradients of x
ry (list): the reduction indices for computing gradients of y
"""
rx = []
ry = []
for i, item in enumerate(x):
if item < y[i]:
rx.append(i)
elif item > y[i]:
ry.append(i)
return rx, ry | def broadcast_gradient_args(x, y):
"""
Return the reduction indices for computing gradients of x op y with broadcast.
Args:
x (Union[list, tuple]): the shape of data input
y (Union[list, tuple]): the shape of data input
Returns:
rx (list): the reduction indices for computing gradients of x
ry (list): the reduction indices for computing gradients of y
"""
rx = []
ry = []
for i, item in enumerate(x):
if item < y[i]:
rx.append(i)
elif item > y[i]:
ry.append(i)
return rx, ry |
Python | def ReduceProd(data, axis=None, keepdims=False, target=utils.CCE):
"""
Computes the product of elements along specific axis
Args:
data (tvm.tensor.Tensor): indicating the input tensor.
axis (Union[list, tuple, int, None]): indicating the dimensions to reduce at. if it's None, all dimensions
will be reduced.
keepdims (Union[bool, None]): if true, keep the dimensions with length 1.
Returns:
Tensor, the product of elements of input tensor.
Supported Platforms:
'Ascend', 'GPU'
"""
utils.check_supported_target(target)
shape = [x.value for x in data.shape]
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8])
if axis is None and keepdims is False:
raise ValueError("keepdims must be True when axis is None!")
axis_new = refine_reduce_axis(data, axis)
if target == utils.CUDA:
return akg.topi.prod(data, axis=axis, keepdims=keepdims)
utils.check_shape(shape)
dtype = data.dtype
if dtype in ["int8", "uint8"]:
data = akg.topi.cast(data, "float16")
vlog_t = Log(data, target)
res = akg.topi.sum(vlog_t, axis=axis_new, keepdims=keepdims)
res = Exp(res, target)
if dtype in ["int8", "uint8"]:
res = akg.topi.cast(res, dtype)
return res | def ReduceProd(data, axis=None, keepdims=False, target=utils.CCE):
"""
Computes the product of elements along specific axis
Args:
data (tvm.tensor.Tensor): indicating the input tensor.
axis (Union[list, tuple, int, None]): indicating the dimensions to reduce at. if it's None, all dimensions
will be reduced.
keepdims (Union[bool, None]): if true, keep the dimensions with length 1.
Returns:
Tensor, the product of elements of input tensor.
Supported Platforms:
'Ascend', 'GPU'
"""
utils.check_supported_target(target)
shape = [x.value for x in data.shape]
utils.ops_dtype_check(data.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8])
if axis is None and keepdims is False:
raise ValueError("keepdims must be True when axis is None!")
axis_new = refine_reduce_axis(data, axis)
if target == utils.CUDA:
return akg.topi.prod(data, axis=axis, keepdims=keepdims)
utils.check_shape(shape)
dtype = data.dtype
if dtype in ["int8", "uint8"]:
data = akg.topi.cast(data, "float16")
vlog_t = Log(data, target)
res = akg.topi.sum(vlog_t, axis=axis_new, keepdims=keepdims)
res = Exp(res, target)
if dtype in ["int8", "uint8"]:
res = akg.topi.cast(res, dtype)
return res |
Python | def sigmoid(data, target="cce"):
"""
Computes sigmoid of x element-wise.
\f[
y = \frac{1}{e^{-x} + 1}
\f]
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
Returns:
tvm.tensor.Tensor, has same type and shape as data.
"""
check_list = ["float16", "float32"]
dtype = data.dtype
if not dtype in check_list:
raise RuntimeError("sigmoid_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
shape = data.shape
utils.check_shape(shape)
res = vrec(vadds(vexp(vmuls(data, -1.0)), 1.0))
return res | def sigmoid(data, target="cce"):
"""
Computes sigmoid of x element-wise.
\f[
y = \frac{1}{e^{-x} + 1}
\f]
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
Returns:
tvm.tensor.Tensor, has same type and shape as data.
"""
check_list = ["float16", "float32"]
dtype = data.dtype
if not dtype in check_list:
raise RuntimeError("sigmoid_cce only support %s while dtype is %s" % (",".join(check_list), dtype))
shape = data.shape
utils.check_shape(shape)
res = vrec(vadds(vexp(vmuls(data, -1.0)), 1.0))
return res |
Python | def launch(kernel, args, output=(-1,)):
"""
simulated run CCE kernel by aic model.
Args:
kernel (str): str of kernel name, or CCE Module.
args (Union[list, tuple]): list or tuple of numpy array.
output (Union[list, tuple]): list or tuple of output argment index.
Returns:
output numpy array, or tuple of numpy array if multi-output.
"""
if isinstance(kernel, akg.tvm.module.Module):
code = kernel.imported_modules[0].get_source()
kernel_name = code.split("_kernel")[0].split(" ")[-1]
else:
kernel_name = kernel
hbm_addr = 0x4000000
hbm_unit = 0x1000000
aic_model_path = os.getenv('AIC_MODEL_PATH')
if not aic_model_path:
msg = "AIC_MODEL_PATH environment variable is not set. Please set it to the dir of model_exe"
raise RuntimeError(msg)
aic_model_path = os.path.realpath(aic_model_path)
# spec : target chip specification.
spec_name = os.getenv('AIC_MODEL_SPEC_NAME')
if not spec_name:
msg = "AIC_MODEL_SPEC_NAME environment variable is not set. Please set it to the name of spec(" \
"It should be xxx.spec and the xxx.spec file is under the AIC_MODEL_PATH directory)"
raise RuntimeError(msg)
aic_out_path = os.path.realpath("aic_out")
if not os.path.exists(aic_out_path):
os.mkdir(aic_out_path)
calog_path = aic_out_path + "/calog"
if not os.path.exists(calog_path):
os.mkdir(calog_path)
model_path = aic_out_path + "/model"
if not os.path.exists(model_path):
subprocess.call(["ln", "-s", aic_model_path + "/model", model_path])
kernel_meta_path = get_kernel_meta_path()
kernel_meta_realpath = os.path.realpath(kernel_meta_path)
if not os.path.exists(kernel_meta_realpath):
msg = "The parameter kernel_meta_realpath can not be found, please check"
raise RuntimeError(msg)
o_name = kernel_meta_realpath + "/" + kernel_name + ".o"
bin_name = aic_out_path + "/kernel.bin"
subprocess.call(["aicore-elf-objcopy", "-O", "binary", "-j", ".text", o_name, bin_name])
load_dict = {}
with open("%s/%s.json" % (kernel_meta_realpath, kernel_name), "r") as f:
load_dict = json.load(f)
arg_info = [] # [{"bin": "xx.bin", "out" : False, "size":100, "addr": 200},]
desc = {"args": arg_info,
"para_addr": hbm_addr,
"bin_addr": hbm_addr + 0x100000,
"bin": "kernel.bin",
"block": load_dict["blockDim"],
"spec": aic_model_path + '/' + spec_name,
"path": aic_out_path}
hbm_addr += hbm_unit
for i, arg in enumerate(args):
bin_name = "a_%d.bin" % (i)
arg.tofile(os.path.join(aic_out_path, bin_name))
info = {"bin": bin_name,
"size": arg.size * arg.dtype.itemsize,
"addr": hbm_addr,
"out": False}
arg_info.append(info)
need_size = arg.size
if need_size % hbm_unit:
need_size += hbm_unit - (need_size % hbm_unit)
hbm_addr += need_size
for i in output:
arg_info[len(arg_info) + i if i < 0 else i]['out'] = True
config_path = aic_out_path + "/config.toml"
if os.path.exists(config_path):
os.remove(config_path)
with os.fdopen(os.open(config_path, os.O_WRONLY | os.O_CREAT, 0o400), 'w') as f:
f.write('title="Sim Config"\n')
f.write('log_open_value=0xffffffff\n')
f.write('chip_version=1\n')
f.write('block_dim=%d\n' % (desc['block']))
f.write('specPathName="%s"\n' % (desc["spec"]))
f.write('path="%s/"\n' % (desc["path"]))
f.write('hbm_para_addr=0x%x\n' % (desc["para_addr"]))
f.write('[BIN]\n')
f.write('name="%s"\n' % (desc['bin']))
f.write('addr=0x%x\n' % (desc['bin_addr']))
for arg in arg_info:
f.write('[[output_para_array]]\n' if arg['out'] else '[[input_para_array]]\n')
f.write('name="%s"\n' % (arg['bin']))
f.write('addr=0x%x\n' % (arg['addr']))
f.write('valid=1\n')
if arg['out']:
f.write('size=0x%x\n' % (arg['size']))
run_path = aic_out_path + "/run.sh"
if os.path.exists(run_path):
os.remove(run_path)
with os.fdopen(os.open(run_path, os.O_WRONLY | os.O_CREAT, 0o500), 'w') as f:
f.write("cd " + aic_out_path + "\n")
f.write("export DVCSPEC_DIR=" + aic_model_path + "\n")
f.write(aic_model_path + "/v100_ca_tag_master --gtest_filter=test_st_case.test_st_ca\n")
subprocess.call(["sh", aic_out_path + "/run.sh"])
out_list = []
for i, arg_ in enumerate(args):
if arg_info[i]['out']:
out_data = np.fromfile(os.path.join(aic_out_path, arg_info[i]['bin']), arg_.dtype)
if out_data.size > args[i].size: # strip unneeded data copied back by aic model
out_data = out_data[0:arg_.size]
out_arg = out_data.reshape(arg_.shape)
out_list.append(out_arg)
return out_list[0] if len(out_list) == 1 else tuple(out_list) | def launch(kernel, args, output=(-1,)):
"""
simulated run CCE kernel by aic model.
Args:
kernel (str): str of kernel name, or CCE Module.
args (Union[list, tuple]): list or tuple of numpy array.
output (Union[list, tuple]): list or tuple of output argment index.
Returns:
output numpy array, or tuple of numpy array if multi-output.
"""
if isinstance(kernel, akg.tvm.module.Module):
code = kernel.imported_modules[0].get_source()
kernel_name = code.split("_kernel")[0].split(" ")[-1]
else:
kernel_name = kernel
hbm_addr = 0x4000000
hbm_unit = 0x1000000
aic_model_path = os.getenv('AIC_MODEL_PATH')
if not aic_model_path:
msg = "AIC_MODEL_PATH environment variable is not set. Please set it to the dir of model_exe"
raise RuntimeError(msg)
aic_model_path = os.path.realpath(aic_model_path)
# spec : target chip specification.
spec_name = os.getenv('AIC_MODEL_SPEC_NAME')
if not spec_name:
msg = "AIC_MODEL_SPEC_NAME environment variable is not set. Please set it to the name of spec(" \
"It should be xxx.spec and the xxx.spec file is under the AIC_MODEL_PATH directory)"
raise RuntimeError(msg)
aic_out_path = os.path.realpath("aic_out")
if not os.path.exists(aic_out_path):
os.mkdir(aic_out_path)
calog_path = aic_out_path + "/calog"
if not os.path.exists(calog_path):
os.mkdir(calog_path)
model_path = aic_out_path + "/model"
if not os.path.exists(model_path):
subprocess.call(["ln", "-s", aic_model_path + "/model", model_path])
kernel_meta_path = get_kernel_meta_path()
kernel_meta_realpath = os.path.realpath(kernel_meta_path)
if not os.path.exists(kernel_meta_realpath):
msg = "The parameter kernel_meta_realpath can not be found, please check"
raise RuntimeError(msg)
o_name = kernel_meta_realpath + "/" + kernel_name + ".o"
bin_name = aic_out_path + "/kernel.bin"
subprocess.call(["aicore-elf-objcopy", "-O", "binary", "-j", ".text", o_name, bin_name])
load_dict = {}
with open("%s/%s.json" % (kernel_meta_realpath, kernel_name), "r") as f:
load_dict = json.load(f)
arg_info = [] # [{"bin": "xx.bin", "out" : False, "size":100, "addr": 200},]
desc = {"args": arg_info,
"para_addr": hbm_addr,
"bin_addr": hbm_addr + 0x100000,
"bin": "kernel.bin",
"block": load_dict["blockDim"],
"spec": aic_model_path + '/' + spec_name,
"path": aic_out_path}
hbm_addr += hbm_unit
for i, arg in enumerate(args):
bin_name = "a_%d.bin" % (i)
arg.tofile(os.path.join(aic_out_path, bin_name))
info = {"bin": bin_name,
"size": arg.size * arg.dtype.itemsize,
"addr": hbm_addr,
"out": False}
arg_info.append(info)
need_size = arg.size
if need_size % hbm_unit:
need_size += hbm_unit - (need_size % hbm_unit)
hbm_addr += need_size
for i in output:
arg_info[len(arg_info) + i if i < 0 else i]['out'] = True
config_path = aic_out_path + "/config.toml"
if os.path.exists(config_path):
os.remove(config_path)
with os.fdopen(os.open(config_path, os.O_WRONLY | os.O_CREAT, 0o400), 'w') as f:
f.write('title="Sim Config"\n')
f.write('log_open_value=0xffffffff\n')
f.write('chip_version=1\n')
f.write('block_dim=%d\n' % (desc['block']))
f.write('specPathName="%s"\n' % (desc["spec"]))
f.write('path="%s/"\n' % (desc["path"]))
f.write('hbm_para_addr=0x%x\n' % (desc["para_addr"]))
f.write('[BIN]\n')
f.write('name="%s"\n' % (desc['bin']))
f.write('addr=0x%x\n' % (desc['bin_addr']))
for arg in arg_info:
f.write('[[output_para_array]]\n' if arg['out'] else '[[input_para_array]]\n')
f.write('name="%s"\n' % (arg['bin']))
f.write('addr=0x%x\n' % (arg['addr']))
f.write('valid=1\n')
if arg['out']:
f.write('size=0x%x\n' % (arg['size']))
run_path = aic_out_path + "/run.sh"
if os.path.exists(run_path):
os.remove(run_path)
with os.fdopen(os.open(run_path, os.O_WRONLY | os.O_CREAT, 0o500), 'w') as f:
f.write("cd " + aic_out_path + "\n")
f.write("export DVCSPEC_DIR=" + aic_model_path + "\n")
f.write(aic_model_path + "/v100_ca_tag_master --gtest_filter=test_st_case.test_st_ca\n")
subprocess.call(["sh", aic_out_path + "/run.sh"])
out_list = []
for i, arg_ in enumerate(args):
if arg_info[i]['out']:
out_data = np.fromfile(os.path.join(aic_out_path, arg_info[i]['bin']), arg_.dtype)
if out_data.size > args[i].size: # strip unneeded data copied back by aic model
out_data = out_data[0:arg_.size]
out_arg = out_data.reshape(arg_.shape)
out_list.append(out_arg)
return out_list[0] if len(out_list) == 1 else tuple(out_list) |
Python | def StandardNormal(seed, shape, target=utils.CUDA):
"""
Operator dsl function for standard_normal.
Args:
seed (int): Random seed.
shape (tuple(int)): Output shape.
Returns:
Tensor with the given shape.
Supported Platforms:
'GPU'
"""
return cuda_standard_normal(None, {"seed": seed, "shape": shape}) | def StandardNormal(seed, shape, target=utils.CUDA):
"""
Operator dsl function for standard_normal.
Args:
seed (int): Random seed.
shape (tuple(int)): Output shape.
Returns:
Tensor with the given shape.
Supported Platforms:
'GPU'
"""
return cuda_standard_normal(None, {"seed": seed, "shape": shape}) |
Python | def intrin_load_im2col(dtype, stride_w, stride_h, kernel_w, kernel_h, dilation_h=1, dilation_w=1):
'''
Create intrin function call for im2col
Args:
dtype: type of the data
Return:
cce intrin function call for im2col
'''
input_w = akg.tvm.var('input_width')
input_h = akg.tvm.var('input_height')
pad_left = akg.tvm.var('padding_left')
pad_right = akg.tvm.var('padding_right')
pad_top = akg.tvm.var('padding_top')
pad_bottom = akg.tvm.var('padding_bottom')
w_idx_kernel = akg.tvm.var('fetch_position_inside_kernel_width')
h_idx_kernel = akg.tvm.var('fetch_position_inside_kernel_height')
h_idx = akg.tvm.var('kernel_h_index_in_input')
w_idx = akg.tvm.var('kernel_w_index_in_input')
window_size = 16
input_b = 1
input_c1 = 1
input_c0 = 16
input_data = akg.tvm.placeholder(
(input_b, input_c1, input_h, input_w, input_c0), dtype=dtype)
result = akg.tvm.compute((window_size, input_c0),
lambda window, c0:
input_data[0,
0,
h_idx + h_idx_kernel + pad_bottom,
w_idx + w_idx_kernel + pad_left + window*stride_w,
c0],
name='img2col_intrinsic')
input_data_buff = akg.tvm.decl_buffer(input_data.shape, input_data.dtype,
name="input_data_buff",
offset_factor=1, scope="local.L1")
result_buff = akg.tvm.decl_buffer(result.shape, result.dtype,
name="result_buff",
offset_factor=1, scope="local.UB")
def intrin_func(ins, outs, sp):
c1_idx = 0
offset = 1
mode = 0
time = 1
csize = 0
aa = ins[0]
bb = outs[0]
ib = akg.tvm.ir_builder.create()
call_args = [sp[0], sp[1], sp[2], sp[3], sp[4], sp[5],
sp[6], sp[7], sp[8], sp[9], c1_idx,
stride_w, stride_h, kernel_w, kernel_h, dilation_w, dilation_h,
offset, mode, time,
csize]
ib.emit(akg.tvm.call_extern(dtype, "img2col_cbuf_to_ub",
bb.access_ptr("w"),
aa.access_ptr("r"),
*call_args))
return ib.get()
with akg.tvm.build_config(offset_factor=1):
return akg.tvm.decl_tensor_intrin(result.op,
intrin_func,
binds={
input_data: input_data_buff, result: result_buff},
scalar_params=[input_w, input_h, pad_left, pad_right, pad_top, pad_bottom,
w_idx_kernel, h_idx_kernel, w_idx, h_idx]) | def intrin_load_im2col(dtype, stride_w, stride_h, kernel_w, kernel_h, dilation_h=1, dilation_w=1):
'''
Create intrin function call for im2col
Args:
dtype: type of the data
Return:
cce intrin function call for im2col
'''
input_w = akg.tvm.var('input_width')
input_h = akg.tvm.var('input_height')
pad_left = akg.tvm.var('padding_left')
pad_right = akg.tvm.var('padding_right')
pad_top = akg.tvm.var('padding_top')
pad_bottom = akg.tvm.var('padding_bottom')
w_idx_kernel = akg.tvm.var('fetch_position_inside_kernel_width')
h_idx_kernel = akg.tvm.var('fetch_position_inside_kernel_height')
h_idx = akg.tvm.var('kernel_h_index_in_input')
w_idx = akg.tvm.var('kernel_w_index_in_input')
window_size = 16
input_b = 1
input_c1 = 1
input_c0 = 16
input_data = akg.tvm.placeholder(
(input_b, input_c1, input_h, input_w, input_c0), dtype=dtype)
result = akg.tvm.compute((window_size, input_c0),
lambda window, c0:
input_data[0,
0,
h_idx + h_idx_kernel + pad_bottom,
w_idx + w_idx_kernel + pad_left + window*stride_w,
c0],
name='img2col_intrinsic')
input_data_buff = akg.tvm.decl_buffer(input_data.shape, input_data.dtype,
name="input_data_buff",
offset_factor=1, scope="local.L1")
result_buff = akg.tvm.decl_buffer(result.shape, result.dtype,
name="result_buff",
offset_factor=1, scope="local.UB")
def intrin_func(ins, outs, sp):
c1_idx = 0
offset = 1
mode = 0
time = 1
csize = 0
aa = ins[0]
bb = outs[0]
ib = akg.tvm.ir_builder.create()
call_args = [sp[0], sp[1], sp[2], sp[3], sp[4], sp[5],
sp[6], sp[7], sp[8], sp[9], c1_idx,
stride_w, stride_h, kernel_w, kernel_h, dilation_w, dilation_h,
offset, mode, time,
csize]
ib.emit(akg.tvm.call_extern(dtype, "img2col_cbuf_to_ub",
bb.access_ptr("w"),
aa.access_ptr("r"),
*call_args))
return ib.get()
with akg.tvm.build_config(offset_factor=1):
return akg.tvm.decl_tensor_intrin(result.op,
intrin_func,
binds={
input_data: input_data_buff, result: result_buff},
scalar_params=[input_w, input_h, pad_left, pad_right, pad_top, pad_bottom,
w_idx_kernel, h_idx_kernel, w_idx, h_idx]) |
Python | def im2col_manual_schedule(data, kernel, stride, pad, target="cce"):
'''
Compute im2col via cce im2col intrin function call directly
Args:
data (akg.tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): List or tuple of two int numbers for pooling window's size.
stride (Union[list, tuple]): List or tuple of two int numbers for window's stride.
pad (Union[List, tuple]): List or tuple of four int numbers for padding(top, bottom, left, and right).
Return:
akg.tvm.tensor.Tensor of same type as data, shape is the zN?.
'''
b, c1, h, w, c0 = data.shape
stride_h, stride_w = stride
kernel_h, kernel_w = kernel
pad_t, pad_b, pad_l, pad_r = pad
# output size <=> number of windows
ho = (h + pad_b + pad_t - kernel_h) // stride_h + 1
wo = (w + pad_r + pad_l - kernel_w) // stride_w + 1
load_im2col = intrin_load_im2col(
data.dtype, stride_w, stride_h, kernel_w, kernel_h)
im2col_shape = (b,
(ho * wo + BLOCK_SIZE - 1) // BLOCK_SIZE,
c1 * kernel_h * kernel_w,
BLOCK_SIZE,
c0)
def _im2col_compute(i, j, k, data):
j_h = (((j * BLOCK_SIZE) // wo) * stride_h) - pad_t
j_w = (((j * BLOCK_SIZE) % wo) * stride_w) - pad_l
h_3d = kernel_h - akg.tvm.max(((j_h + kernel_h) - h), 0)
pad_t_3d = akg.tvm.max(-j_h, 0)
pad_b_3d = akg.tvm.max(((j_h + kernel_h) - h), 0)
w_idx_kernel = (k % kernel_w)
h_idx_kernel = ((k // kernel_w) % kernel_h)
w_idx = j_w
h_idx = akg.tvm.min(j_h, 0)
c1_idx = (k // kernel_w) // kernel_h
load_im2col_input = data[i,
c1_idx,
akg.tvm.max(j_h, 0):akg.tvm.min(j_h + kernel_h, h),
0:w,
0:c0]
return load_im2col(load_im2col_input,
w, h_3d, pad_l, pad_r, pad_t_3d, pad_b_3d,
w_idx_kernel, h_idx_kernel, w_idx, h_idx)
# assume we need the whole width of a
# choose a section of the rows of a that encompasses all of the windows in the current window-batch
res = akg.tvm.compute(im2col_shape,
lambda i, j, k:
_im2col_compute(i, j, k, data),
name='im2col_fractal')
def comp_func(s):
data_l1 = s.cache_read(data, "local.L1", [res])
res_ub = s.cache_write(res, "local.UB")
b_ax, hw1_ax, c1_kh_kw_ax, hw0_ax, c0_ax = res.op.axis
hw1_out = hw1_ax
if akg.tvm.all([wo > BLOCK_SIZE]):
cut_w1 = wo // BLOCK_SIZE
cut_h1 = 1
cut_hw1 = cut_h1 * cut_w1
hw1_out, hw1_in = s[res].split(hw1_ax, cut_hw1)
s[data_l1].compute_at(s[res], hw1_out)
s[res_ub].compute_at(s[res], c1_kh_kw_ax)
return res, comp_func | def im2col_manual_schedule(data, kernel, stride, pad, target="cce"):
'''
Compute im2col via cce im2col intrin function call directly
Args:
data (akg.tvm.tensor.Tensor): Tensor of type float16, float32.
kernel (Union[list, tuple]): List or tuple of two int numbers for pooling window's size.
stride (Union[list, tuple]): List or tuple of two int numbers for window's stride.
pad (Union[List, tuple]): List or tuple of four int numbers for padding(top, bottom, left, and right).
Return:
akg.tvm.tensor.Tensor of same type as data, shape is the zN?.
'''
b, c1, h, w, c0 = data.shape
stride_h, stride_w = stride
kernel_h, kernel_w = kernel
pad_t, pad_b, pad_l, pad_r = pad
# output size <=> number of windows
ho = (h + pad_b + pad_t - kernel_h) // stride_h + 1
wo = (w + pad_r + pad_l - kernel_w) // stride_w + 1
load_im2col = intrin_load_im2col(
data.dtype, stride_w, stride_h, kernel_w, kernel_h)
im2col_shape = (b,
(ho * wo + BLOCK_SIZE - 1) // BLOCK_SIZE,
c1 * kernel_h * kernel_w,
BLOCK_SIZE,
c0)
def _im2col_compute(i, j, k, data):
j_h = (((j * BLOCK_SIZE) // wo) * stride_h) - pad_t
j_w = (((j * BLOCK_SIZE) % wo) * stride_w) - pad_l
h_3d = kernel_h - akg.tvm.max(((j_h + kernel_h) - h), 0)
pad_t_3d = akg.tvm.max(-j_h, 0)
pad_b_3d = akg.tvm.max(((j_h + kernel_h) - h), 0)
w_idx_kernel = (k % kernel_w)
h_idx_kernel = ((k // kernel_w) % kernel_h)
w_idx = j_w
h_idx = akg.tvm.min(j_h, 0)
c1_idx = (k // kernel_w) // kernel_h
load_im2col_input = data[i,
c1_idx,
akg.tvm.max(j_h, 0):akg.tvm.min(j_h + kernel_h, h),
0:w,
0:c0]
return load_im2col(load_im2col_input,
w, h_3d, pad_l, pad_r, pad_t_3d, pad_b_3d,
w_idx_kernel, h_idx_kernel, w_idx, h_idx)
# assume we need the whole width of a
# choose a section of the rows of a that encompasses all of the windows in the current window-batch
res = akg.tvm.compute(im2col_shape,
lambda i, j, k:
_im2col_compute(i, j, k, data),
name='im2col_fractal')
def comp_func(s):
data_l1 = s.cache_read(data, "local.L1", [res])
res_ub = s.cache_write(res, "local.UB")
b_ax, hw1_ax, c1_kh_kw_ax, hw0_ax, c0_ax = res.op.axis
hw1_out = hw1_ax
if akg.tvm.all([wo > BLOCK_SIZE]):
cut_w1 = wo // BLOCK_SIZE
cut_h1 = 1
cut_hw1 = cut_h1 * cut_w1
hw1_out, hw1_in = s[res].split(hw1_ax, cut_hw1)
s[data_l1].compute_at(s[res], hw1_out)
s[res_ub].compute_at(s[res], c1_kh_kw_ax)
return res, comp_func |
Python | def find_best(self, model, num, exclusive, n_iter=None):
"""find best configs based on simulated annealing"""
tic = time.time()
temp, early_stop, log_interval = self.temp, self.early_stop, self.log_interval
if n_iter is None:
n_iter = self.n_iter
if self.persistent and self.points is not None:
points = self.points
else:
points = np.random.choice(self.space.length, self.parallel_size)
# change points in space to configs
configs = self.get_configs(points)
scores = 1e8 / model.predict(configs, x_type="config")
# build heap and insert initial points
heap_items = [(float('-inf'), -i) for i in range(num)]
heapq.heapify(heap_items)
in_heap = set(exclusive)
in_heap.update([-i for i in range(num)])
for s, p in zip(scores, points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k = 0
k_last_modify = 0
if isinstance(temp, (tuple, list, np.ndarray)):
t = temp[0]
cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)
else:
t = temp
cool = 0
while k < n_iter and k < k_last_modify + early_stop:
new_points = np.empty_like(points)
for i, p in enumerate(points):
new_points[i] = self.space.random_walk(p)
new_configs = self.get_configs(new_points)
new_scores = 1e8 / model.predict(new_configs, x_type="config")
ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))
ac_index = np.random.random(len(ac_prob)) < ac_prob
points[ac_index] = new_points[ac_index]
scores[ac_index] = new_scores[ac_index]
for s, p in zip(new_scores, new_points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k_last_modify = k
k += 1
t -= cool
if log_interval and k % log_interval == 0:
t_str = "%.2f" % t
logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t"
"elapsed: %.2f",
k, k_last_modify, heap_items[0][0],
np.max([v for v, _ in heap_items]), t_str,
time.time() - tic)
heap_items.sort(key=lambda item: -item[0])
logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\telapsed: %.2f",
k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)
logger.debug("SA Maximums: %s", heap_items)
if self.persistent:
self.points = points
return [x[1] for x in heap_items] | def find_best(self, model, num, exclusive, n_iter=None):
"""find best configs based on simulated annealing"""
tic = time.time()
temp, early_stop, log_interval = self.temp, self.early_stop, self.log_interval
if n_iter is None:
n_iter = self.n_iter
if self.persistent and self.points is not None:
points = self.points
else:
points = np.random.choice(self.space.length, self.parallel_size)
# change points in space to configs
configs = self.get_configs(points)
scores = 1e8 / model.predict(configs, x_type="config")
# build heap and insert initial points
heap_items = [(float('-inf'), -i) for i in range(num)]
heapq.heapify(heap_items)
in_heap = set(exclusive)
in_heap.update([-i for i in range(num)])
for s, p in zip(scores, points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k = 0
k_last_modify = 0
if isinstance(temp, (tuple, list, np.ndarray)):
t = temp[0]
cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)
else:
t = temp
cool = 0
while k < n_iter and k < k_last_modify + early_stop:
new_points = np.empty_like(points)
for i, p in enumerate(points):
new_points[i] = self.space.random_walk(p)
new_configs = self.get_configs(new_points)
new_scores = 1e8 / model.predict(new_configs, x_type="config")
ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))
ac_index = np.random.random(len(ac_prob)) < ac_prob
points[ac_index] = new_points[ac_index]
scores[ac_index] = new_scores[ac_index]
for s, p in zip(new_scores, new_points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k_last_modify = k
k += 1
t -= cool
if log_interval and k % log_interval == 0:
t_str = "%.2f" % t
logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t"
"elapsed: %.2f",
k, k_last_modify, heap_items[0][0],
np.max([v for v, _ in heap_items]), t_str,
time.time() - tic)
heap_items.sort(key=lambda item: -item[0])
logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\telapsed: %.2f",
k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)
logger.debug("SA Maximums: %s", heap_items)
if self.persistent:
self.points = points
return [x[1] for x in heap_items] |
Python | def concat(raw_tensors, axis):
"""
concat shapes at axis, support int8, uint8, int16, int32 float16, float32
Args:
raw_tensors (list[tvm.tensor.Tensor]): list of tensors
axis (int): concat axis
Returns:
concat tensor
"""
concat_para_check(raw_tensors, axis)
def _get_input_tensors():
shapes = []
for in_tensor in list(raw_tensors):
shape = [int(in_tensor.shape[i].value) for i in range(len(in_tensor.shape))]
shapes.append(shape)
shapes_list = list(shapes)
return shapes_list
shapes = _get_input_tensors()
res_shape = shapes[0][:]
for i in range(1, len(shapes)):
res_shape[axis] += shapes[i][axis]
sel = []
n_tensor = len(raw_tensors)
def compute_func(*indice):
if n_tensor > 1:
for nn in range(n_tensor - 1):
if nn == 0:
tensor_a = raw_tensors[0]
tensor_b = raw_tensors[1]
c_shape = shapes[0][:]
indice2 = list(indice[:])
indice2[axis] = indice[axis] - tensor_a.shape[axis]
sel.append(akg.tvm.expr.Select(indice[axis] < c_shape[axis],
tensor_a[indice], tensor_b[tuple(indice2)]))
c_shape[axis] += shapes[1][axis]
else:
tensor_a = sel[nn - 1]
tensor_b = raw_tensors[nn + 1]
indice2 = list(indice[:])
indice2[axis] = indice[axis] - c_shape[axis]
sel.append(akg.tvm.expr.Select(indice[axis] < c_shape[axis], tensor_a, tensor_b[tuple(indice2)]))
c_shape[axis] += shapes[nn + 1][axis]
else:
return raw_tensors[0][indice]
return sel[-1]
res = akg.tvm.compute(res_shape, compute_func, name="concat", tag="concat")
return res | def concat(raw_tensors, axis):
"""
concat shapes at axis, support int8, uint8, int16, int32 float16, float32
Args:
raw_tensors (list[tvm.tensor.Tensor]): list of tensors
axis (int): concat axis
Returns:
concat tensor
"""
concat_para_check(raw_tensors, axis)
def _get_input_tensors():
shapes = []
for in_tensor in list(raw_tensors):
shape = [int(in_tensor.shape[i].value) for i in range(len(in_tensor.shape))]
shapes.append(shape)
shapes_list = list(shapes)
return shapes_list
shapes = _get_input_tensors()
res_shape = shapes[0][:]
for i in range(1, len(shapes)):
res_shape[axis] += shapes[i][axis]
sel = []
n_tensor = len(raw_tensors)
def compute_func(*indice):
if n_tensor > 1:
for nn in range(n_tensor - 1):
if nn == 0:
tensor_a = raw_tensors[0]
tensor_b = raw_tensors[1]
c_shape = shapes[0][:]
indice2 = list(indice[:])
indice2[axis] = indice[axis] - tensor_a.shape[axis]
sel.append(akg.tvm.expr.Select(indice[axis] < c_shape[axis],
tensor_a[indice], tensor_b[tuple(indice2)]))
c_shape[axis] += shapes[1][axis]
else:
tensor_a = sel[nn - 1]
tensor_b = raw_tensors[nn + 1]
indice2 = list(indice[:])
indice2[axis] = indice[axis] - c_shape[axis]
sel.append(akg.tvm.expr.Select(indice[axis] < c_shape[axis], tensor_a, tensor_b[tuple(indice2)]))
c_shape[axis] += shapes[nn + 1][axis]
else:
return raw_tensors[0][indice]
return sel[-1]
res = akg.tvm.compute(res_shape, compute_func, name="concat", tag="concat")
return res |
Python | def add_ad(head, a, b, scale, target="cce"):
"""Compute gradient of add operator using automatic differentiate."""
output = Add(a, b, scale, target=target)
jacs = list(akg.differentiate(output, [a], head))
return jacs[0] | def add_ad(head, a, b, scale, target="cce"):
"""Compute gradient of add operator using automatic differentiate."""
output = Add(a, b, scale, target=target)
jacs = list(akg.differentiate(output, [a], head))
return jacs[0] |
Python | def genData(shape_dz, shape_x, shape_y, grad_x, grad_y, op_type, dtype):
""" Generate data for testing the op """
shapes = [shape_x, shape_y, shape_dz]
inputs = []
for i in range(len(shapes)):
shape = shapes[i]
input = random_gaussian(shape, miu=1, sigma=0.1 + i * 0.1).astype(dtype)
inputs.append(input)
input_x = np.broadcast_to(inputs[0], shape_dz)
input_y = np.broadcast_to(inputs[1], shape_dz)
if op_type is "LE":
dx = np.where(input_x <= input_y, inputs[2], 0).astype(dtype)
dy = np.where(input_x <= input_y, 0, inputs[2]).astype(dtype)
elif op_type is "GE":
dx = np.where(input_x >= input_y, inputs[2], 0).astype(dtype)
dy = np.where(input_x >= input_y, 0, inputs[2]).astype(dtype)
dx = broadcast_grad(dx, shape_x)
dy = broadcast_grad(dy, shape_y)
outs = []
if grad_x and grad_y:
outs = [dx, dy]
elif grad_x and grad_y is False:
outs = dx
elif grad_y and grad_x is False:
outs = dy
return inputs, outs | def genData(shape_dz, shape_x, shape_y, grad_x, grad_y, op_type, dtype):
""" Generate data for testing the op """
shapes = [shape_x, shape_y, shape_dz]
inputs = []
for i in range(len(shapes)):
shape = shapes[i]
input = random_gaussian(shape, miu=1, sigma=0.1 + i * 0.1).astype(dtype)
inputs.append(input)
input_x = np.broadcast_to(inputs[0], shape_dz)
input_y = np.broadcast_to(inputs[1], shape_dz)
if op_type is "LE":
dx = np.where(input_x <= input_y, inputs[2], 0).astype(dtype)
dy = np.where(input_x <= input_y, 0, inputs[2]).astype(dtype)
elif op_type is "GE":
dx = np.where(input_x >= input_y, inputs[2], 0).astype(dtype)
dy = np.where(input_x >= input_y, 0, inputs[2]).astype(dtype)
dx = broadcast_grad(dx, shape_x)
dy = broadcast_grad(dy, shape_y)
outs = []
if grad_x and grad_y:
outs = [dx, dy]
elif grad_x and grad_y is False:
outs = dx
elif grad_y and grad_x is False:
outs = dy
return inputs, outs |
Python | def detection_five2four(data, box_num, target=utils.CCE):
"""
Change data from five dims to specific four dims format.
Shape changes: [N, ceil((box_num * 4) / 16), H, W, 16] -> [N, box_num * H * W, 4, 1].
Note:
With detection_five2four + concat, it can make datas with
shape [16, 16//16, 38, 38, 16], [16, 24//16+1, 19, 19, 16],
[16, 24//16+1, 10, 10, 16], [16, 24//16+1, 5, 5, 16], [16, 16//16, 3, 3, 16]
and [16, 16//16, 1, 1, 16] to one data with shape [16, 8732, 4, 1].
Args:
data (Tensor): tvm.Tensor of type float16 with five dims format which
the length of its third and fourth dim is equal (H == W).
box_num (Integer): number of box.
Returns:
A tvm.Tensor with 4 dims shape which the length of its last dim is 1.
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16)
block_size = 16
batch_size, c1, wh, _, c0 = data.shape
# each box has 4 numbers
pad = (box_num * 4) % block_size
@script(capture=locals())
def reshape(inputs):
out = allocate((batch_size, wh * wh, box_num * 4), 'float16', 'local')
for i in range(batch_size):
for j in range(c1):
for k in range(wh):
for l in range(wh):
for m in range(c0):
out[i, k * wh + l, j * c0 + m] = inputs[i, j, k, l, m]
return out
@script(capture=locals())
def reshape_with_pad(inputs):
out = allocate((batch_size, wh * wh, box_num * 4), 'float16', 'local')
for i in range(batch_size):
for j in range(box_num // 4 + 1):
for k in range(wh):
for l in range(wh):
if j == box_num // 4:
for m1 in range(pad):
out[i, k * wh + l, j * block_size + m1] = inputs[i, j, k, l, m1]
else:
for m in range(c0):
out[i, k * wh + l, j * block_size + m] = inputs[i, j, k, l, m]
return out
if pad == 0:
data_rs = reshape(data)
else:
data_rs = reshape_with_pad(data)
return data_rs | def detection_five2four(data, box_num, target=utils.CCE):
"""
Change data from five dims to specific four dims format.
Shape changes: [N, ceil((box_num * 4) / 16), H, W, 16] -> [N, box_num * H * W, 4, 1].
Note:
With detection_five2four + concat, it can make datas with
shape [16, 16//16, 38, 38, 16], [16, 24//16+1, 19, 19, 16],
[16, 24//16+1, 10, 10, 16], [16, 24//16+1, 5, 5, 16], [16, 16//16, 3, 3, 16]
and [16, 16//16, 1, 1, 16] to one data with shape [16, 8732, 4, 1].
Args:
data (Tensor): tvm.Tensor of type float16 with five dims format which
the length of its third and fourth dim is equal (H == W).
box_num (Integer): number of box.
Returns:
A tvm.Tensor with 4 dims shape which the length of its last dim is 1.
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16)
block_size = 16
batch_size, c1, wh, _, c0 = data.shape
# each box has 4 numbers
pad = (box_num * 4) % block_size
@script(capture=locals())
def reshape(inputs):
out = allocate((batch_size, wh * wh, box_num * 4), 'float16', 'local')
for i in range(batch_size):
for j in range(c1):
for k in range(wh):
for l in range(wh):
for m in range(c0):
out[i, k * wh + l, j * c0 + m] = inputs[i, j, k, l, m]
return out
@script(capture=locals())
def reshape_with_pad(inputs):
out = allocate((batch_size, wh * wh, box_num * 4), 'float16', 'local')
for i in range(batch_size):
for j in range(box_num // 4 + 1):
for k in range(wh):
for l in range(wh):
if j == box_num // 4:
for m1 in range(pad):
out[i, k * wh + l, j * block_size + m1] = inputs[i, j, k, l, m1]
else:
for m in range(c0):
out[i, k * wh + l, j * block_size + m] = inputs[i, j, k, l, m]
return out
if pad == 0:
data_rs = reshape(data)
else:
data_rs = reshape_with_pad(data)
return data_rs |
Python | def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, target=utils.CCE):
"""
Ftrl-proximal optimization algorithm.
Note:
accum_new = accum + grad * grad
linear_new = linear + grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
x = clip(linear_new, -l1, l1) - linear_new
y = accum_new^(-lr_power) / lr + 2 * l2
var_new = x / y
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
linear (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
lr_power (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. Value of it
must be less or equal to zero.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
tvm.tensor.Tensor, updated linear.
"""
# As vlog instruction on mini product has a percision problem and mini product used to infer
# rather than train
if product_is_mini():
raise RuntimeError("The apply_ftrl operator does not support the mini product")
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, linear, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2, lr_power):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, linear, grad, lr, l1, l2, lr_power):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new, linear_new = apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, None,
lr_power, with_l2_shrinkage=False)
# update by inplace
(var_new, accum_new, linear_new), binds_info = \
TensorUtils.inplace_set_tensors((var, accum, linear), (var_new, accum_new, linear_new))
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, linear_new, attrs | def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, target=utils.CCE):
"""
Ftrl-proximal optimization algorithm.
Note:
accum_new = accum + grad * grad
linear_new = linear + grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
x = clip(linear_new, -l1, l1) - linear_new
y = accum_new^(-lr_power) / lr + 2 * l2
var_new = x / y
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
linear (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
lr_power (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. Value of it
must be less or equal to zero.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
tvm.tensor.Tensor, updated linear.
"""
# As vlog instruction on mini product has a percision problem and mini product used to infer
# rather than train
if product_is_mini():
raise RuntimeError("The apply_ftrl operator does not support the mini product")
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, linear, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2, lr_power):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, linear, grad, lr, l1, l2, lr_power):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new, linear_new = apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, None,
lr_power, with_l2_shrinkage=False)
# update by inplace
(var_new, accum_new, linear_new), binds_info = \
TensorUtils.inplace_set_tensors((var, accum, linear), (var_new, accum_new, linear_new))
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, linear_new, attrs |
Python | def depthwise(data, N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW, block_size, use_bias=False):
"""
Depthwise 5-D convolutions,every channel has its filter-kernel
Args:
data (list):a list,the size is 3 if use_bias else the size is 2;
data[0] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W)
data[1] tvm.tensor.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0)
data[2] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0)
N (int): batchsize
H (int): height of featureMap
W (int): width of featureMap
CI (int): channel of featureMap
k_ch (int): channel of Filter
KH (int): height of Filter
KW (int): width of Filter
PAD_H (int): padding pixels in vertical direction
PAD_W (int): padding pixels in horizontal direction
SH (int): stride in vertical direction
SW (int): stride in horizontal direction
block_size (int): a int var also called "C0"
use_bias (bool ): If True need add bias, else bias equal to zero.
Returns:
akg.tvm.Tensor of same type as data, shape is 5D(N, CI*k_ch//C0, OH, OW, C0)
"""
check_list = ["float16"]
dtype = data[0].dtype
if not (dtype in check_list):
raise RuntimeError("depthwise only support %s while dtype is %s" % (",".join(check_list), dtype))
for i in range(len(data)):
shape = data[i].shape
utils.check_shape(shape)
conv_dtype = 'float16'
group = CI // block_size
CO = CI * k_ch
assert k_ch == 1
assert CO % group == 0 and CI % group == 0
assert CO % block_size == 0 and (CI // group) % block_size == 0
clear = False # if clear, use auto tiling
# (N, CI, H, W) -> (N, C0, H, W, C1)
A = data[0]
# (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block)
B = data[1]
if use_bias:
bias = data[2]
bias_name = bias.op.name
else:
bias = None
bias_name = "bias_name"
key = [N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW]
hash_key = str((tuple(key)))
if hash_key in depthwise_set_dim_map:
cutH, cutCo, cutM, cutK, cutN = depthwise_set_dim_map[hash_key]
else:
# raise RuntimeError("other can not find cutH, cutCo, cutM, cutK, cutN")
cutH = (KH - 1) * KH + 1
cutCo = 16
cutM = 16
cutK = 16 * KH * KW
cutN = 16
clear = True # use auto tiling
OH = (H + 2 * PAD_H - KH) // SH + 1
OW = (W + 2 * PAD_W - KW) // SW + 1
kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1")
kh = akg.tvm.reduce_axis((0, KH), name="kh")
kw = akg.tvm.reduce_axis((0, KW), name="kw")
kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0")
p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W
output_name = "output"
output_bias_name = "output_bias"
attr = {
"pragma_conv_kernel_n": CO,
"pragma_conv_kernel_h": KH,
"pragma_conv_kernel_w": KW,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1,
"pragma_conv_stride_h": SH,
"pragma_conv_stride_w": SW,
"pragma_conv_fm_n": N,
"pragma_conv_fm_c": CI,
"pragma_conv_fm_h": H,
"pragma_conv_fm_w": W,
"pragma_conv_dilation_h": 1,
"pragma_conv_dilation_w": 1,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name
}
if not clear:
attr["pragma_conv_h_cut"] = cutH
attr["pragma_conv_w_cut"] = W + 2 * PAD_W
attr["pragma_conv_co_cut"] = cutCo
attr["pragma_conv_m_cut"] = cutM
attr["pragma_conv_k_cut"] = cutK
attr["pragma_conv_n_cut"] = cutN
C = akg.tvm.compute((N, CO // block_size, OH, OW, block_size),
lambda n, c1, h, w, c0: akg.lang.ascend.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1),
(w * SW + kw) < p_left, (w * SW + kw) > (W + p_left - 1)),
akg.tvm.const(0.0, conv_dtype),
A[n, c1 // ((CO // block_size) // group) * (
(CI // block_size) // group) + kc1, (
h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
# A[n, kc1, (h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
* B[(kc1 * KH + kh) * KW + kw, c1, c0, kc0], axis=[kc1, kh, kw, kc0]),
attrs=attr, name=output_name)
if use_bias:
out = akg.tvm.compute(C.shape, lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
name=output_bias_name)
else:
out = C
return out | def depthwise(data, N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW, block_size, use_bias=False):
"""
Depthwise 5-D convolutions,every channel has its filter-kernel
Args:
data (list):a list,the size is 3 if use_bias else the size is 2;
data[0] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W)
data[1] tvm.tensor.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0)
data[2] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0)
N (int): batchsize
H (int): height of featureMap
W (int): width of featureMap
CI (int): channel of featureMap
k_ch (int): channel of Filter
KH (int): height of Filter
KW (int): width of Filter
PAD_H (int): padding pixels in vertical direction
PAD_W (int): padding pixels in horizontal direction
SH (int): stride in vertical direction
SW (int): stride in horizontal direction
block_size (int): a int var also called "C0"
use_bias (bool ): If True need add bias, else bias equal to zero.
Returns:
akg.tvm.Tensor of same type as data, shape is 5D(N, CI*k_ch//C0, OH, OW, C0)
"""
check_list = ["float16"]
dtype = data[0].dtype
if not (dtype in check_list):
raise RuntimeError("depthwise only support %s while dtype is %s" % (",".join(check_list), dtype))
for i in range(len(data)):
shape = data[i].shape
utils.check_shape(shape)
conv_dtype = 'float16'
group = CI // block_size
CO = CI * k_ch
assert k_ch == 1
assert CO % group == 0 and CI % group == 0
assert CO % block_size == 0 and (CI // group) % block_size == 0
clear = False # if clear, use auto tiling
# (N, CI, H, W) -> (N, C0, H, W, C1)
A = data[0]
# (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block)
B = data[1]
if use_bias:
bias = data[2]
bias_name = bias.op.name
else:
bias = None
bias_name = "bias_name"
key = [N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW]
hash_key = str((tuple(key)))
if hash_key in depthwise_set_dim_map:
cutH, cutCo, cutM, cutK, cutN = depthwise_set_dim_map[hash_key]
else:
# raise RuntimeError("other can not find cutH, cutCo, cutM, cutK, cutN")
cutH = (KH - 1) * KH + 1
cutCo = 16
cutM = 16
cutK = 16 * KH * KW
cutN = 16
clear = True # use auto tiling
OH = (H + 2 * PAD_H - KH) // SH + 1
OW = (W + 2 * PAD_W - KW) // SW + 1
kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1")
kh = akg.tvm.reduce_axis((0, KH), name="kh")
kw = akg.tvm.reduce_axis((0, KW), name="kw")
kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0")
p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W
output_name = "output"
output_bias_name = "output_bias"
attr = {
"pragma_conv_kernel_n": CO,
"pragma_conv_kernel_h": KH,
"pragma_conv_kernel_w": KW,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1,
"pragma_conv_stride_h": SH,
"pragma_conv_stride_w": SW,
"pragma_conv_fm_n": N,
"pragma_conv_fm_c": CI,
"pragma_conv_fm_h": H,
"pragma_conv_fm_w": W,
"pragma_conv_dilation_h": 1,
"pragma_conv_dilation_w": 1,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name
}
if not clear:
attr["pragma_conv_h_cut"] = cutH
attr["pragma_conv_w_cut"] = W + 2 * PAD_W
attr["pragma_conv_co_cut"] = cutCo
attr["pragma_conv_m_cut"] = cutM
attr["pragma_conv_k_cut"] = cutK
attr["pragma_conv_n_cut"] = cutN
C = akg.tvm.compute((N, CO // block_size, OH, OW, block_size),
lambda n, c1, h, w, c0: akg.lang.ascend.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1),
(w * SW + kw) < p_left, (w * SW + kw) > (W + p_left - 1)),
akg.tvm.const(0.0, conv_dtype),
A[n, c1 // ((CO // block_size) // group) * (
(CI // block_size) // group) + kc1, (
h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
# A[n, kc1, (h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
* B[(kc1 * KH + kh) * KW + kw, c1, c0, kc0], axis=[kc1, kh, kw, kc0]),
attrs=attr, name=output_name)
if use_bias:
out = akg.tvm.compute(C.shape, lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
name=output_bias_name)
else:
out = C
return out |
Python | def gen_data(begin, begin_mask, dtype, ellipsis_mask, end, end_mask, new_axis_mask, shape, shrink_axis_mask,
strides):
""" Generate data for testing the op """
input = np.random.uniform(low=-1.0, high=1.0, size=tuple(shape)).astype(dtype)
# get numpy result
slices = args_to_slices(begin, end, strides,
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
expect = input[tuple(slices)]
out_shape = expect.shape if expect.shape != (0,) else (1,)
output = np.full(out_shape, np.nan, dtype)
return expect, input, output | def gen_data(begin, begin_mask, dtype, ellipsis_mask, end, end_mask, new_axis_mask, shape, shrink_axis_mask,
strides):
""" Generate data for testing the op """
input = np.random.uniform(low=-1.0, high=1.0, size=tuple(shape)).astype(dtype)
# get numpy result
slices = args_to_slices(begin, end, strides,
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
expect = input[tuple(slices)]
out_shape = expect.shape if expect.shape != (0,) else (1,)
output = np.full(out_shape, np.nan, dtype)
return expect, input, output |
Python | def apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, target=utils.CCE):
"""
Ftrl-proximal optimization algorithm with l2_shrinkage.
Note:
grad_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad * grad
linear_new = linear + grad_shrinkage - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
x = clip(linear_new, -l1, l1) - linear_new
y = accum_new^(-lr_power) / lr + 2 * l2
var_new = x / y
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
linear (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2_shrinkage (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
lr_power (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. Value of it
must be less or equal to zero.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
tvm.tensor.Tensor, updated linear.
"""
# As vlog instruction on mini product has a percision problem and mini product used to infer
# rather than train
if product_is_mini():
raise RuntimeError("The apply_ftrl_v2 operator does not support the mini product")
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, linear, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2, l2_shrinkage, lr_power):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new, linear_new = apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
with_l2_shrinkage=True)
# update by inplace
(var_new, accum_new, linear_new), binds_info = TensorUtils.\
inplace_set_tensors((var, accum, linear), (var_new, accum_new, linear_new))
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, linear_new, attrs | def apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, target=utils.CCE):
"""
Ftrl-proximal optimization algorithm with l2_shrinkage.
Note:
grad_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad * grad
linear_new = linear + grad_shrinkage - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
x = clip(linear_new, -l1, l1) - linear_new
y = accum_new^(-lr_power) / lr + 2 * l2
var_new = x / y
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
linear (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2_shrinkage (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
lr_power (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. Value of it
must be less or equal to zero.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
tvm.tensor.Tensor, updated linear.
"""
# As vlog instruction on mini product has a percision problem and mini product used to infer
# rather than train
if product_is_mini():
raise RuntimeError("The apply_ftrl_v2 operator does not support the mini product")
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, linear, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2, l2_shrinkage, lr_power):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new, linear_new = apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
with_l2_shrinkage=True)
# update by inplace
(var_new, accum_new, linear_new), binds_info = TensorUtils.\
inplace_set_tensors((var, accum, linear), (var_new, accum_new, linear_new))
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, linear_new, attrs |
Python | def modify_common_constraints(value, constraint, level=TileLevel.C1):
"""api for dsl to modify some default constraint used in auto tiling."""
if constraint not in TileConstraint:
raise ValueError("Tile constraints must be chosen from {0}".format(TileConstraint))
if constraint == TileConstraint.SET_MEM_RATIO:
return create_custom_tiling_node(TileMode.COMMON, tile_level=level, mem_ratio=double(value))
if constraint == TileConstraint.THREAD_MIN:
return create_custom_tiling_node(TileMode.COMMON, thread_min=value)
if constraint == TileConstraint.THREAD_MAX:
return create_custom_tiling_node(TileMode.COMMON, thread_max=value)
if constraint == TileConstraint.THREAD_MOD:
return create_custom_tiling_node(TileMode.COMMON, thread_mod=value)
if constraint == TileConstraint.BLOCK_MIN:
return create_custom_tiling_node(TileMode.COMMON, block_min=value)
if constraint == TileConstraint.BLOCK_MAX:
return create_custom_tiling_node(TileMode.COMMON, block_max=value)
if constraint == TileConstraint.BLOCK_MOD:
return create_custom_tiling_node(TileMode.COMMON, block_mod=value)
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraint.value)) | def modify_common_constraints(value, constraint, level=TileLevel.C1):
"""api for dsl to modify some default constraint used in auto tiling."""
if constraint not in TileConstraint:
raise ValueError("Tile constraints must be chosen from {0}".format(TileConstraint))
if constraint == TileConstraint.SET_MEM_RATIO:
return create_custom_tiling_node(TileMode.COMMON, tile_level=level, mem_ratio=double(value))
if constraint == TileConstraint.THREAD_MIN:
return create_custom_tiling_node(TileMode.COMMON, thread_min=value)
if constraint == TileConstraint.THREAD_MAX:
return create_custom_tiling_node(TileMode.COMMON, thread_max=value)
if constraint == TileConstraint.THREAD_MOD:
return create_custom_tiling_node(TileMode.COMMON, thread_mod=value)
if constraint == TileConstraint.BLOCK_MIN:
return create_custom_tiling_node(TileMode.COMMON, block_min=value)
if constraint == TileConstraint.BLOCK_MAX:
return create_custom_tiling_node(TileMode.COMMON, block_max=value)
if constraint == TileConstraint.BLOCK_MOD:
return create_custom_tiling_node(TileMode.COMMON, block_mod=value)
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraint.value)) |
Python | def create_constraint_on_axis(values, constraints, band=0, axis=None, level=TileLevel.C1):
"""api for dsl to create tiling constraints on certain axis."""
if constraints not in TileConstraint:
raise ValueError("Tile constraints must be chosen from {0}".format(TileConstraint))
res = []
if axis is None:
axis = [i for i in range(len(values))]
elif not isinstance(axis, (int, list, tuple)):
raise TypeError("Axis should be int, list or tuple")
if isinstance(axis, int):
axis = [axis]
if isinstance(values, (str, int)):
values = [values]
else:
raise TypeError("Tiling factor must be string or int, while receives {}".format(type(values)))
if len(axis) != len(values):
raise ValueError("Length of axis must equal to length of values")
for a, v in zip(axis, values):
if constraints == TileConstraint.MIN:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_min=v))
elif constraints == TileConstraint.MOD:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_mod=v))
elif constraints == TileConstraint.FACTOR:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_factor=v))
elif constraints == TileConstraint.CANDIDATE:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_candidate=v))
elif constraints == TileConstraint.MAX:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_max=v))
elif constraints == TileConstraint.FORBID_ISOLATE:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, forbid_isolate=v))
elif constraints == TileConstraint.SET_AXIS_INFO:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, axis_info=v))
elif constraints == TileConstraint.SET_PRIORITY:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, priority=v))
else:
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraints.value))
return res | def create_constraint_on_axis(values, constraints, band=0, axis=None, level=TileLevel.C1):
"""api for dsl to create tiling constraints on certain axis."""
if constraints not in TileConstraint:
raise ValueError("Tile constraints must be chosen from {0}".format(TileConstraint))
res = []
if axis is None:
axis = [i for i in range(len(values))]
elif not isinstance(axis, (int, list, tuple)):
raise TypeError("Axis should be int, list or tuple")
if isinstance(axis, int):
axis = [axis]
if isinstance(values, (str, int)):
values = [values]
else:
raise TypeError("Tiling factor must be string or int, while receives {}".format(type(values)))
if len(axis) != len(values):
raise ValueError("Length of axis must equal to length of values")
for a, v in zip(axis, values):
if constraints == TileConstraint.MIN:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_min=v))
elif constraints == TileConstraint.MOD:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_mod=v))
elif constraints == TileConstraint.FACTOR:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_factor=v))
elif constraints == TileConstraint.CANDIDATE:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_candidate=v))
elif constraints == TileConstraint.MAX:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, tile_max=v))
elif constraints == TileConstraint.FORBID_ISOLATE:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, forbid_isolate=v))
elif constraints == TileConstraint.SET_AXIS_INFO:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, axis_info=v))
elif constraints == TileConstraint.SET_PRIORITY:
res.append(create_custom_tiling_node(TileMode.AXIS, tile_level=level,
tile_band=band, tile_axis=a, priority=v))
else:
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraints.value))
return res |
Python | def create_constraint_on_tensor(tensor, values, constraints, tensor_pos=None, level=TileLevel.C1):
"""api for dsl to create tiling constraints on certain tensor."""
if constraints not in TileConstraint:
raise ValueError("Tile constraint must be chosen from {0}".format(TileConstraint))
if isinstance(tensor, (list, tuple)):
for t in tensor:
if not isinstance(t, akg.tvm.tensor.Tensor):
raise TypeError("Tensor should be tvm.tensor.Tensor or a list/tuple of tvm.tensor.Tensor.")
tensor_name = [tensor.op.name] if isinstance(tensor, akg.tvm.tensor.Tensor) else [t.op.name for t in tensor]
values = [values] if isinstance(values, (str, int)) else values
if tensor_pos is None:
tensor_pos = [i for i in range(len(values))]
else:
tensor_pos = [tensor_pos] if isinstance(tensor_pos, int) else tensor_pos
if len(tensor_pos) != len(values):
raise ValueError("Length of tensor position is not compatible with length of constraint values")
strategy = list()
for t in tensor_name:
for p, v in zip(tensor_pos, values):
if constraints == TileConstraint.MIN:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_min=v))
elif constraints == TileConstraint.MOD:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_mod=v))
elif constraints == TileConstraint.FACTOR:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_factor=v))
elif constraints == TileConstraint.CANDIDATE:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_candidate=v))
elif constraints == TileConstraint.MAX:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_max=v))
elif constraints == TileConstraint.FORBID_ISOLATE:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, forbid_isolate=v))
elif constraints == TileConstraint.SET_PRIORITY:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, priority=v))
elif constraints == TileConstraint.SET_EXPANSION:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, expansion=v))
else:
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraints.value))
return strategy | def create_constraint_on_tensor(tensor, values, constraints, tensor_pos=None, level=TileLevel.C1):
"""api for dsl to create tiling constraints on certain tensor."""
if constraints not in TileConstraint:
raise ValueError("Tile constraint must be chosen from {0}".format(TileConstraint))
if isinstance(tensor, (list, tuple)):
for t in tensor:
if not isinstance(t, akg.tvm.tensor.Tensor):
raise TypeError("Tensor should be tvm.tensor.Tensor or a list/tuple of tvm.tensor.Tensor.")
tensor_name = [tensor.op.name] if isinstance(tensor, akg.tvm.tensor.Tensor) else [t.op.name for t in tensor]
values = [values] if isinstance(values, (str, int)) else values
if tensor_pos is None:
tensor_pos = [i for i in range(len(values))]
else:
tensor_pos = [tensor_pos] if isinstance(tensor_pos, int) else tensor_pos
if len(tensor_pos) != len(values):
raise ValueError("Length of tensor position is not compatible with length of constraint values")
strategy = list()
for t in tensor_name:
for p, v in zip(tensor_pos, values):
if constraints == TileConstraint.MIN:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_min=v))
elif constraints == TileConstraint.MOD:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_mod=v))
elif constraints == TileConstraint.FACTOR:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_factor=v))
elif constraints == TileConstraint.CANDIDATE:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_candidate=v))
elif constraints == TileConstraint.MAX:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, tile_max=v))
elif constraints == TileConstraint.FORBID_ISOLATE:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, forbid_isolate=v))
elif constraints == TileConstraint.SET_PRIORITY:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, tile_pos=p, priority=v))
elif constraints == TileConstraint.SET_EXPANSION:
strategy.append(create_custom_tiling_node(TileMode.TENSOR, tile_level=level,
tensor_name=t, expansion=v))
else:
raise TypeError("Constraint {} is not supported in this api, please use other api"
.format(constraints.value))
return strategy |
Python | def create_template(tensor, template, level=TileLevel.C1):
"""create template according to given template arg."""
tensor_name = tensor.op.name
if template not in TileTemplate:
raise ValueError("Invalid template name {0}, must chosen from {1}".
format(template, TileTemplate))
if template in [TileTemplate.NCHW, TileTemplate.DEFAULT_FORMAT]:
return template_nchw(tensor_name, level)
if template == TileTemplate.NC1HWC0:
return template_nc1hwc0(tensor_name, level)
if template == TileTemplate.NHWC:
return template_nhwc(tensor_name, level)
return [] | def create_template(tensor, template, level=TileLevel.C1):
"""create template according to given template arg."""
tensor_name = tensor.op.name
if template not in TileTemplate:
raise ValueError("Invalid template name {0}, must chosen from {1}".
format(template, TileTemplate))
if template in [TileTemplate.NCHW, TileTemplate.DEFAULT_FORMAT]:
return template_nchw(tensor_name, level)
if template == TileTemplate.NC1HWC0:
return template_nc1hwc0(tensor_name, level)
if template == TileTemplate.NHWC:
return template_nhwc(tensor_name, level)
return [] |
Python | def to_tvm_type(value, t_type):
"""transform integer and string to corresponding type in tvm."""
if isinstance(value, int):
return akg.tvm.expr.IntImm("int32", value)
if isinstance(value, str):
return akg.tvm.expr.StringImm(value)
if isinstance(value, (akg.tvm.expr.IntImm, akg.tvm.expr.StringImm)):
return value
raise TypeError("{} only support integer or string, found {}".format(t_type, type(value))) | def to_tvm_type(value, t_type):
"""transform integer and string to corresponding type in tvm."""
if isinstance(value, int):
return akg.tvm.expr.IntImm("int32", value)
if isinstance(value, str):
return akg.tvm.expr.StringImm(value)
if isinstance(value, (akg.tvm.expr.IntImm, akg.tvm.expr.StringImm)):
return value
raise TypeError("{} only support integer or string, found {}".format(t_type, type(value))) |
Python | def create_custom_tiling_node(tile_mode,
tile_level=TileLevel.C1,
tensor_name=DEFAULT_STRING,
tile_pos=DEFAULT_VALUE,
tile_band=DEFAULT_VALUE,
tile_axis=DEFAULT_VALUE,
tile_min=DEFAULT_VALUE,
tile_max=DEFAULT_VALUE,
tile_mod=DEFAULT_VALUE,
tile_factor=DEFAULT_VALUE,
tile_candidate=DEFAULT_VALUE,
forbid_isolate=DEFAULT_VALUE,
axis_info=DEFAULT_STRING,
priority=DEFAULT_VALUE,
expansion=DEFAULT_VALUE,
mem_ratio=double(DEFAULT_VALUE),
thread_min=[],
thread_max=[],
thread_mod=[],
block_min=[],
block_max=[],
block_mod=[]):
"""default method to create custom tiling node, all values are default except tile mode."""
tile_min = to_tvm_type(tile_min, "tile_min")
tile_max = to_tvm_type(tile_max, "tile_max")
tile_mod = to_tvm_type(tile_mod, "tile_mod")
tile_factor = to_tvm_type(tile_factor, "tile_factor")
tile_candidate = to_tvm_type(tile_candidate, "tile_candidate")
return akg.tvm.make.node(NODE_TYPE,
tile_level=akg.tvm.expr.StringImm(tile_level.value),
tile_mode=akg.tvm.expr.StringImm(tile_mode.value),
tensor_name=akg.tvm.expr.StringImm(tensor_name),
tile_pos=tile_pos,
tile_band=tile_band,
tile_axis=tile_axis,
tile_min=tile_min,
tile_max=tile_max,
tile_mod=tile_mod,
tile_factor=tile_factor,
tile_candidate=tile_candidate,
forbid_isolate=forbid_isolate,
axis_info=akg.tvm.expr.StringImm(axis_info),
priority=priority,
expansion=expansion,
mem_ratio=mem_ratio,
thread_min=thread_min,
thread_max=thread_max,
thread_mod=thread_mod,
block_min=block_min,
block_max=block_max,
block_mod=block_mod) | def create_custom_tiling_node(tile_mode,
tile_level=TileLevel.C1,
tensor_name=DEFAULT_STRING,
tile_pos=DEFAULT_VALUE,
tile_band=DEFAULT_VALUE,
tile_axis=DEFAULT_VALUE,
tile_min=DEFAULT_VALUE,
tile_max=DEFAULT_VALUE,
tile_mod=DEFAULT_VALUE,
tile_factor=DEFAULT_VALUE,
tile_candidate=DEFAULT_VALUE,
forbid_isolate=DEFAULT_VALUE,
axis_info=DEFAULT_STRING,
priority=DEFAULT_VALUE,
expansion=DEFAULT_VALUE,
mem_ratio=double(DEFAULT_VALUE),
thread_min=[],
thread_max=[],
thread_mod=[],
block_min=[],
block_max=[],
block_mod=[]):
"""default method to create custom tiling node, all values are default except tile mode."""
tile_min = to_tvm_type(tile_min, "tile_min")
tile_max = to_tvm_type(tile_max, "tile_max")
tile_mod = to_tvm_type(tile_mod, "tile_mod")
tile_factor = to_tvm_type(tile_factor, "tile_factor")
tile_candidate = to_tvm_type(tile_candidate, "tile_candidate")
return akg.tvm.make.node(NODE_TYPE,
tile_level=akg.tvm.expr.StringImm(tile_level.value),
tile_mode=akg.tvm.expr.StringImm(tile_mode.value),
tensor_name=akg.tvm.expr.StringImm(tensor_name),
tile_pos=tile_pos,
tile_band=tile_band,
tile_axis=tile_axis,
tile_min=tile_min,
tile_max=tile_max,
tile_mod=tile_mod,
tile_factor=tile_factor,
tile_candidate=tile_candidate,
forbid_isolate=forbid_isolate,
axis_info=akg.tvm.expr.StringImm(axis_info),
priority=priority,
expansion=expansion,
mem_ratio=mem_ratio,
thread_min=thread_min,
thread_max=thread_max,
thread_mod=thread_mod,
block_min=block_min,
block_max=block_max,
block_mod=block_mod) |
Python | def template_nc1hwc0(tensor_name, level):
"""create default tiling strategy for nc1hwc0 template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c0 = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=4,
tile_max="FULL")
return [node_n, node_c0] | def template_nc1hwc0(tensor_name, level):
"""create default tiling strategy for nc1hwc0 template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c0 = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=4,
tile_max="FULL")
return [node_n, node_c0] |
Python | def template_nchw(tensor_name, level):
"""create default tiling strategy for nchw template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=1,
tile_mod=to_tvm_type(CUBE_UNIT, "tile_factor"))
return [node_n, node_c] | def template_nchw(tensor_name, level):
"""create default tiling strategy for nchw template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=1,
tile_mod=to_tvm_type(CUBE_UNIT, "tile_factor"))
return [node_n, node_c] |
Python | def template_nhwc(tensor_name, level):
"""create default tiling strategy for nhwc template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=3,
tile_mod=to_tvm_type(CUBE_UNIT, "tile_factor"))
return [node_n, node_c] | def template_nhwc(tensor_name, level):
"""create default tiling strategy for nhwc template."""
node_n = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=0,
tile_factor=to_tvm_type(1, "tile_factor"))
node_c = create_custom_tiling_node(TileMode.TENSOR,
tile_level=level,
tensor_name=tensor_name,
tile_pos=3,
tile_mod=to_tvm_type(CUBE_UNIT, "tile_factor"))
return [node_n, node_c] |
Python | def reg_gen_key_func(gen_key_func):
"""register generated key by function."""
def decorate(func_):
@wraps(func_)
def wrapper(*args, **kwargs):
gen_key_func_map[func_.__name__] = gen_key_func
return func_(*args, **kwargs)
return wrapper
return decorate | def reg_gen_key_func(gen_key_func):
"""register generated key by function."""
def decorate(func_):
@wraps(func_)
def wrapper(*args, **kwargs):
gen_key_func_map[func_.__name__] = gen_key_func
return func_(*args, **kwargs)
return wrapper
return decorate |
Python | def _parse_merged_json(desc_d, stitch_tensor_name, input_tensor_name, output_tensor_name):
"""
Parse merged json to get subgraph splitted by stitch nodes and input-output relationship of merged graph.
Args:
desc_d (dict): The dict of compute description.
stitch_tensor_name (list[string]): The list of stitch node tensors.
stitch nodes are regarded as edges of sub_graphs. The smallest number of sub_graph is the length of
stitch_tensor_name + 1.
input_tensor_name (list[string]): The list of input tensors.
output_tensor_name (list[string]): The list of output tensors.
output tensors would be regarded as inter_output_tensor and final_output_tensor. The main difference
of the two kinds of tensors is whether out-degree is zero, in which final_output_tensor is the tensor
with zero out-degree in merged graph and otherwise, it is inter_output_tensor.
Returns:
extra_subgraph_output (dict): The dict of extra output tensors for each sub_graph.
final_output_list (list[string]): The list of final output tensors.
output tensors in this list are are final_output_tensor and the subgraph they belong to doesn't
include stitch nodes.
final_output_within_graph (list[string]): The list of final output tensors.
output tensors in this list are final_output_tensor and the subgraph they belong to also includes
stitch node.
"""
# Initialize sub_graph number as the smallest possible number of sub graph.
# sub graphs number might increase based on graph structure.
sub_graph_length = len(stitch_tensor_name)
sub_graph_node = [set() for _ in range(sub_graph_length)]
# use dict to save extra outputs for each sub_graph.
extra_subgraph_output = dict(zip(stitch_tensor_name, [[] for _ in range(sub_graph_length)]))
in_out_dict = {}
inter_output_list = set()
final_output_list = set()
final_output_within_graph = []
idx = 0
final_output_graph = False
for i in range(len(desc_d['op_desc']) - 1, -1, -1):
op_info = desc_d['op_desc'][i]
for out_desc in op_info['output_desc']:
# switch to next subgraph if find stitch node.
if out_desc['tensor_name'] in stitch_tensor_name:
idx += 1
cur_stitch_node = out_desc['tensor_name']
# when current subgraph concludes final output and encounters with stitch node,
# increase number of subgraph.
if final_output_graph:
final_output_list.add(cur_final_node)
final_output_within_graph.remove(cur_final_node)
sub_graph_length += 1
sub_graph_node += [set()]
final_output_graph = False
# out_desc not in in_out_dict means out-degree is zero.
if out_desc['tensor_name'] not in in_out_dict:
final_output_graph = True
cur_final_node = out_desc['tensor_name']
final_output_within_graph.append(cur_final_node)
sub_graph_node[idx].add(out_desc['tensor_name'])
for input_desc in op_info['input_desc']:
for sub_input_desc in input_desc:
sub_graph_node[idx].add(sub_input_desc['tensor_name'])
tmp_name = sub_input_desc['tensor_name']
if tmp_name in output_tensor_name:
inter_output_list.add(sub_input_desc['tensor_name'])
for subgraph in sub_graph_node[0: idx]:
extra_output = _is_tensor(sub_input_desc) and tmp_name not in stitch_tensor_name \
and tmp_name not in input_tensor_name
used_by_other_sg = tmp_name in subgraph
used_as_output = tmp_name in output_tensor_name
extra_output = extra_output and (used_by_other_sg or used_as_output)
if extra_output and cur_stitch_node and not final_output_graph:
extra_subgraph_output[cur_stitch_node].insert(0, tmp_name)
break
if sub_input_desc['tensor_name'] not in in_out_dict:
in_out_dict[sub_input_desc['tensor_name']] = [out_desc['tensor_name']]
else:
in_out_dict[sub_input_desc['tensor_name']].append(out_desc['tensor_name'])
return extra_subgraph_output, list(final_output_list), final_output_within_graph | def _parse_merged_json(desc_d, stitch_tensor_name, input_tensor_name, output_tensor_name):
"""
Parse merged json to get subgraph splitted by stitch nodes and input-output relationship of merged graph.
Args:
desc_d (dict): The dict of compute description.
stitch_tensor_name (list[string]): The list of stitch node tensors.
stitch nodes are regarded as edges of sub_graphs. The smallest number of sub_graph is the length of
stitch_tensor_name + 1.
input_tensor_name (list[string]): The list of input tensors.
output_tensor_name (list[string]): The list of output tensors.
output tensors would be regarded as inter_output_tensor and final_output_tensor. The main difference
of the two kinds of tensors is whether out-degree is zero, in which final_output_tensor is the tensor
with zero out-degree in merged graph and otherwise, it is inter_output_tensor.
Returns:
extra_subgraph_output (dict): The dict of extra output tensors for each sub_graph.
final_output_list (list[string]): The list of final output tensors.
output tensors in this list are are final_output_tensor and the subgraph they belong to doesn't
include stitch nodes.
final_output_within_graph (list[string]): The list of final output tensors.
output tensors in this list are final_output_tensor and the subgraph they belong to also includes
stitch node.
"""
# Initialize sub_graph number as the smallest possible number of sub graph.
# sub graphs number might increase based on graph structure.
sub_graph_length = len(stitch_tensor_name)
sub_graph_node = [set() for _ in range(sub_graph_length)]
# use dict to save extra outputs for each sub_graph.
extra_subgraph_output = dict(zip(stitch_tensor_name, [[] for _ in range(sub_graph_length)]))
in_out_dict = {}
inter_output_list = set()
final_output_list = set()
final_output_within_graph = []
idx = 0
final_output_graph = False
for i in range(len(desc_d['op_desc']) - 1, -1, -1):
op_info = desc_d['op_desc'][i]
for out_desc in op_info['output_desc']:
# switch to next subgraph if find stitch node.
if out_desc['tensor_name'] in stitch_tensor_name:
idx += 1
cur_stitch_node = out_desc['tensor_name']
# when current subgraph concludes final output and encounters with stitch node,
# increase number of subgraph.
if final_output_graph:
final_output_list.add(cur_final_node)
final_output_within_graph.remove(cur_final_node)
sub_graph_length += 1
sub_graph_node += [set()]
final_output_graph = False
# out_desc not in in_out_dict means out-degree is zero.
if out_desc['tensor_name'] not in in_out_dict:
final_output_graph = True
cur_final_node = out_desc['tensor_name']
final_output_within_graph.append(cur_final_node)
sub_graph_node[idx].add(out_desc['tensor_name'])
for input_desc in op_info['input_desc']:
for sub_input_desc in input_desc:
sub_graph_node[idx].add(sub_input_desc['tensor_name'])
tmp_name = sub_input_desc['tensor_name']
if tmp_name in output_tensor_name:
inter_output_list.add(sub_input_desc['tensor_name'])
for subgraph in sub_graph_node[0: idx]:
extra_output = _is_tensor(sub_input_desc) and tmp_name not in stitch_tensor_name \
and tmp_name not in input_tensor_name
used_by_other_sg = tmp_name in subgraph
used_as_output = tmp_name in output_tensor_name
extra_output = extra_output and (used_by_other_sg or used_as_output)
if extra_output and cur_stitch_node and not final_output_graph:
extra_subgraph_output[cur_stitch_node].insert(0, tmp_name)
break
if sub_input_desc['tensor_name'] not in in_out_dict:
in_out_dict[sub_input_desc['tensor_name']] = [out_desc['tensor_name']]
else:
in_out_dict[sub_input_desc['tensor_name']].append(out_desc['tensor_name'])
return extra_subgraph_output, list(final_output_list), final_output_within_graph |
Python | def stitch_json_split(desc_d):
"""
split sub graph from merged json file.
Using 'buffer_stitch' to store stitch info from graph kernel.
Args:
desc_d: dict of compute description
Returns:
List of spilted json info.
List of original input.
Dict of dominance info.
"""
stitch_jsons = []
input_tensor_name = [tensor[0]['tensor_name'] for tensor in desc_d['input_desc']]
output_tensor_name = [tensor['tensor_name'] for tensor in desc_d['output_desc']]
stitch_node = desc_d['buffer_stitch']['stitch_op']
stitch_node_name = [node for stitchnode in stitch_node for node in stitchnode]
extra_subgraph_output, final_output_list, final_output_within_graph = \
_parse_merged_json(desc_d, stitch_node_name, input_tensor_name, output_tensor_name)
# traverse extra_subgraph_output to save extra output into subgraph.
stitch_node = []
extra_list = []
for item in extra_subgraph_output:
cur_list = [item]
for node in extra_subgraph_output[item]:
if node not in extra_list:
extra_list.append(node)
cur_list.append(node)
stitch_node.append(cur_list)
stitch_node_name = [node for stitchnode in stitch_node for node in stitchnode]
# initialize req_map
req_op_size = [0] * len(stitch_node_name)
req_map = dict(zip(stitch_node_name, req_op_size))
# add final output within subgraph into the last initialized stitch sub_graph.
stitch_node = stitch_node[:-1] + [stitch_node[-1] + final_output_within_graph]
# add final output into stitch_op.
stitch_node += [[op] for op in final_output_list if op not in stitch_node_name]
stitchnode_list = [node for stitchnode in stitch_node for node in stitchnode]
# each output tensor can only be parsed as output once in all subgraphs.
# All tensors in stitch_node_list will be put into output_name.
# Save other output tensors which are not in stitch_node_name for the output collection of subgraphs.
complement_output = [tensor for tensor in output_tensor_name if tensor not in stitchnode_list]
# initialize sub_stitch_graphs.
sub_stitch_graphs = []
for i, stitch_op in enumerate(stitch_node):
sub_stitch_graphs.append(Graph(stitch_op))
sub_stitch_graphs, inplace_assign_map, fake_output_list = \
_collect_subgraph_info(desc_d, sub_stitch_graphs, req_map,
input_tensor_name, complement_output, stitchnode_list)
# reverse op order to generate topological subgraph
for i, sg in enumerate(sub_stitch_graphs):
sg.ops = list(reversed(sg.ops))
sg.op_name = desc_d['op']
stitch_json_str = _sub_graph_info(sg, desc_d)
if os.getenv(get_dump_ir_flag()) == "on":
if not os.path.exists("stitch_info"):
try:
os.mkdir("stitch_info")
except OSError as err:
# 17, OSError: [Errno 17] File exists
if err.errno == 17:
pass
else:
raise err
with open('stitch_info/' + sg.op_name + '_stitch_' + str(i + 1) + '.json', 'w+') as f:
f.write(stitch_json_str)
with open('stitch_info/' + sg.op_name + '_stitch.json', 'w+') as f:
f.write(json.dumps(desc_d))
stitch_jsons.append(stitch_json_str)
clean_op_list = [fake_op for fake_op in fake_output_list if fake_op in stitch_node_name]
# add fake outputs into output_tensor_name
output_tensor_name += clean_op_list
# start node for dominance tree is final_output_list + final_output_within_graph.
start_node = final_output_list + final_output_within_graph
alloc_map, reuse_map = _shared_memory_optimization(desc_d, req_map, output_tensor_name)
# remove fake output from alloc_map and store them into clean_op_map
clean_op_map = dict()
for fake_op in clean_op_list:
if fake_op in alloc_map:
clean_info = alloc_map[fake_op]
alloc_map.pop(fake_op)
else:
clean_info = reuse_map[fake_op]
reuse_map.pop(fake_op)
clean_op_map[inplace_assign_map[fake_op]] = clean_info
if not alloc_map:
alloc_map['EMPTY'] = []
if not clean_op_map:
clean_op_map['EMPTY'] = []
if not reuse_map:
reuse_map['EMPTY'] = []
return stitch_jsons, input_tensor_name, output_tensor_name, alloc_map, reuse_map, clean_op_map | def stitch_json_split(desc_d):
"""
split sub graph from merged json file.
Using 'buffer_stitch' to store stitch info from graph kernel.
Args:
desc_d: dict of compute description
Returns:
List of spilted json info.
List of original input.
Dict of dominance info.
"""
stitch_jsons = []
input_tensor_name = [tensor[0]['tensor_name'] for tensor in desc_d['input_desc']]
output_tensor_name = [tensor['tensor_name'] for tensor in desc_d['output_desc']]
stitch_node = desc_d['buffer_stitch']['stitch_op']
stitch_node_name = [node for stitchnode in stitch_node for node in stitchnode]
extra_subgraph_output, final_output_list, final_output_within_graph = \
_parse_merged_json(desc_d, stitch_node_name, input_tensor_name, output_tensor_name)
# traverse extra_subgraph_output to save extra output into subgraph.
stitch_node = []
extra_list = []
for item in extra_subgraph_output:
cur_list = [item]
for node in extra_subgraph_output[item]:
if node not in extra_list:
extra_list.append(node)
cur_list.append(node)
stitch_node.append(cur_list)
stitch_node_name = [node for stitchnode in stitch_node for node in stitchnode]
# initialize req_map
req_op_size = [0] * len(stitch_node_name)
req_map = dict(zip(stitch_node_name, req_op_size))
# add final output within subgraph into the last initialized stitch sub_graph.
stitch_node = stitch_node[:-1] + [stitch_node[-1] + final_output_within_graph]
# add final output into stitch_op.
stitch_node += [[op] for op in final_output_list if op not in stitch_node_name]
stitchnode_list = [node for stitchnode in stitch_node for node in stitchnode]
# each output tensor can only be parsed as output once in all subgraphs.
# All tensors in stitch_node_list will be put into output_name.
# Save other output tensors which are not in stitch_node_name for the output collection of subgraphs.
complement_output = [tensor for tensor in output_tensor_name if tensor not in stitchnode_list]
# initialize sub_stitch_graphs.
sub_stitch_graphs = []
for i, stitch_op in enumerate(stitch_node):
sub_stitch_graphs.append(Graph(stitch_op))
sub_stitch_graphs, inplace_assign_map, fake_output_list = \
_collect_subgraph_info(desc_d, sub_stitch_graphs, req_map,
input_tensor_name, complement_output, stitchnode_list)
# reverse op order to generate topological subgraph
for i, sg in enumerate(sub_stitch_graphs):
sg.ops = list(reversed(sg.ops))
sg.op_name = desc_d['op']
stitch_json_str = _sub_graph_info(sg, desc_d)
if os.getenv(get_dump_ir_flag()) == "on":
if not os.path.exists("stitch_info"):
try:
os.mkdir("stitch_info")
except OSError as err:
# 17, OSError: [Errno 17] File exists
if err.errno == 17:
pass
else:
raise err
with open('stitch_info/' + sg.op_name + '_stitch_' + str(i + 1) + '.json', 'w+') as f:
f.write(stitch_json_str)
with open('stitch_info/' + sg.op_name + '_stitch.json', 'w+') as f:
f.write(json.dumps(desc_d))
stitch_jsons.append(stitch_json_str)
clean_op_list = [fake_op for fake_op in fake_output_list if fake_op in stitch_node_name]
# add fake outputs into output_tensor_name
output_tensor_name += clean_op_list
# start node for dominance tree is final_output_list + final_output_within_graph.
start_node = final_output_list + final_output_within_graph
alloc_map, reuse_map = _shared_memory_optimization(desc_d, req_map, output_tensor_name)
# remove fake output from alloc_map and store them into clean_op_map
clean_op_map = dict()
for fake_op in clean_op_list:
if fake_op in alloc_map:
clean_info = alloc_map[fake_op]
alloc_map.pop(fake_op)
else:
clean_info = reuse_map[fake_op]
reuse_map.pop(fake_op)
clean_op_map[inplace_assign_map[fake_op]] = clean_info
if not alloc_map:
alloc_map['EMPTY'] = []
if not clean_op_map:
clean_op_map['EMPTY'] = []
if not reuse_map:
reuse_map['EMPTY'] = []
return stitch_jsons, input_tensor_name, output_tensor_name, alloc_map, reuse_map, clean_op_map |
Python | def ApproximateEqual(x, y, tolerance=1e-5, target=utils.CCE):
"""
abs(x-y) less than or equal to the tolerance
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
y (tvm.tensor.Tensor): Tensor of type float16, float32.
tolerance (float): default is 1e-5
Returns:
tvm.tensor.Tensor. If abs(x-y) less than or equal to the tolerance return True,
else return False.
Supported Platforms:
'Ascend'
"""
if tolerance < 0:
raise RuntimeError("tolerance should >= 0")
# check shape
utils.check_shape(x)
utils.check_shape(y)
shape = get_shape(x)
if shape != get_shape(y):
raise RuntimeError("input shape must be same, but got %s vs %s",
shape, get_shape(y))
# check input tensor data_type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.ops_dtype_check(y.dtype, utils.DtypeForDavinci.ALL_FLOAT)
dtype = x.dtype
if dtype != y.dtype:
raise RuntimeError("input type must be same, but got %s vs %s",
dtype, y.dtype)
res_vsub = Sub(x, y, target)
res_vabs = Abs(res_vsub, target)
# As vcmp_lt and vsel instruction don't support fp32 on mini
# It can be simplified by some methods, such as , "auto cast"
if product_is_mini():
dtype = "float16"
res_vabs = Cast(res_vabs, dtype, target)
t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t")
f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f")
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(
res_vabs[indice] <= akg.tvm.const(tolerance, dtype),
t[indice], f[indice]))
# It can be be simplified that let cast op support fp16 and fp32 to bool type
res_fp16 = Cast(res, "float16", target)
res_bool = akg.tvm.compute(shape, lambda *indice: res_fp16(*indice).astype("bool"))
return res_bool | def ApproximateEqual(x, y, tolerance=1e-5, target=utils.CCE):
"""
abs(x-y) less than or equal to the tolerance
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
y (tvm.tensor.Tensor): Tensor of type float16, float32.
tolerance (float): default is 1e-5
Returns:
tvm.tensor.Tensor. If abs(x-y) less than or equal to the tolerance return True,
else return False.
Supported Platforms:
'Ascend'
"""
if tolerance < 0:
raise RuntimeError("tolerance should >= 0")
# check shape
utils.check_shape(x)
utils.check_shape(y)
shape = get_shape(x)
if shape != get_shape(y):
raise RuntimeError("input shape must be same, but got %s vs %s",
shape, get_shape(y))
# check input tensor data_type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.ops_dtype_check(y.dtype, utils.DtypeForDavinci.ALL_FLOAT)
dtype = x.dtype
if dtype != y.dtype:
raise RuntimeError("input type must be same, but got %s vs %s",
dtype, y.dtype)
res_vsub = Sub(x, y, target)
res_vabs = Abs(res_vsub, target)
# As vcmp_lt and vsel instruction don't support fp32 on mini
# It can be simplified by some methods, such as , "auto cast"
if product_is_mini():
dtype = "float16"
res_vabs = Cast(res_vabs, dtype, target)
t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t")
f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f")
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(
res_vabs[indice] <= akg.tvm.const(tolerance, dtype),
t[indice], f[indice]))
# It can be be simplified that let cast op support fp16 and fp32 to bool type
res_fp16 = Cast(res, "float16", target)
res_bool = akg.tvm.compute(shape, lambda *indice: res_fp16(*indice).astype("bool"))
return res_bool |
Python | def sum_data(data, axes, keepdims):
"""sum one axis data at a time"""
for x in axes:
data = akg.topi.sum(data, axis=x, keepdims=keepdims)
return data | def sum_data(data, axes, keepdims):
"""sum one axis data at a time"""
for x in axes:
data = akg.topi.sum(data, axis=x, keepdims=keepdims)
return data |
Python | def apply_adadelta_run(shape, dtype, epsilon, attrs=None):
"""run function for dsl function apply_adadelta."""
shapes = [shape, shape, shape, shape, (1,), (1,)]
dtypes = [dtype] * len(shapes)
op_attrs = [epsilon]
mod = utils.op_build_test(apply_adadelta, shapes, dtypes,
op_attrs=op_attrs, kernel_name="apply_adadelta", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_delta", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) | def apply_adadelta_run(shape, dtype, epsilon, attrs=None):
"""run function for dsl function apply_adadelta."""
shapes = [shape, shape, shape, shape, (1,), (1,)]
dtypes = [dtype] * len(shapes)
op_attrs = [epsilon]
mod = utils.op_build_test(apply_adadelta, shapes, dtypes,
op_attrs=op_attrs, kernel_name="apply_adadelta", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_delta", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results) |
Python | def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
accum = np.abs(random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
accum_update = np.abs(random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
rho = np.random.rand(1).astype(dtype)
inputs = [var, accum, accum_update, grad, lr, rho]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_accum = rho * accum + (one - rho) * grad * grad
update = np.sqrt(accum_update + epsilon) / np.sqrt(out_accum + epsilon) * grad
out_accum_update = rho * accum_update + (one - rho) * update * update
out_var = var - update * lr
expects = [out_var, out_accum, out_accum_update]
args = inputs
return inputs, expects, args | def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
accum = np.abs(random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
accum_update = np.abs(random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
rho = np.random.rand(1).astype(dtype)
inputs = [var, accum, accum_update, grad, lr, rho]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_accum = rho * accum + (one - rho) * grad * grad
update = np.sqrt(accum_update + epsilon) / np.sqrt(out_accum + epsilon) * grad
out_accum_update = rho * accum_update + (one - rho) * update * update
out_var = var - update * lr
expects = [out_var, out_accum, out_accum_update]
args = inputs
return inputs, expects, args |
Python | def Add(data1, data2, scale=1.0, polyhedral=True, attrs={}, target=utils.CCE):
"""
Computes data1 + data2 elementwise, broadcast is supported.
Args:
data1 (tvm.tensor.Tensor): Tensor.
data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen.
Returns:
tvm.tensor.Tensor, add result, with same type as input tensors and broadcasted shape of data1 and data2.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _add_ascend(data1, data2, scale, polyhedral, attrs)
else:
return _add(data1, data2) | def Add(data1, data2, scale=1.0, polyhedral=True, attrs={}, target=utils.CCE):
"""
Computes data1 + data2 elementwise, broadcast is supported.
Args:
data1 (tvm.tensor.Tensor): Tensor.
data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen.
Returns:
tvm.tensor.Tensor, add result, with same type as input tensors and broadcasted shape of data1 and data2.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _add_ascend(data1, data2, scale, polyhedral, attrs)
else:
return _add(data1, data2) |
Python | def cce(options=None):
"""Returns a cce target.
Parameters
----------
options : list of str
Additional options
"""
options = options if options else []
return _api_internal._TargetCreate("cce", *options) | def cce(options=None):
"""Returns a cce target.
Parameters
----------
options : list of str
Additional options
"""
options = options if options else []
return _api_internal._TargetCreate("cce", *options) |
Python | def opengl(model='unknown', options=None):
"""Returns a OpenGL target.
Parameters
----------
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return _api_internal._TargetCreate("opengl", *opts) | def opengl(model='unknown', options=None):
"""Returns a OpenGL target.
Parameters
----------
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return _api_internal._TargetCreate("opengl", *opts) |
Python | def bifrost(model='unknown', options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", '-model=%s' % model]
opts = _merge_opts(opts, options)
return _api_internal._TargetCreate("opencl", *opts) | def bifrost(model='unknown', options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", '-model=%s' % model]
opts = _merge_opts(opts, options)
return _api_internal._TargetCreate("opencl", *opts) |
Python | def broadcast(var, shape, output_dtype=None):
"""
broadcast scalar to tensor, only support float16
Args:
var (Union[int, float, tvm.const]): input
shape (tvm.tensor.Tensor): shape
output_dtype (tvm.tensor.Tensor): var.dtype
Returns:
tvm.tensor.Tensor, broadcast tensor
"""
if isinstance(shape, akg.tvm.container.Array):
shape = shape_to_list(shape)
if isinstance(var, akg.tvm.tensor.Tensor):
tensor = var
orig_shape = shape_to_list(tensor.shape)
if len(orig_shape) > len(shape):
raise RuntimeError(
"Length of shape of input must be less than or equal to output for Tensor Broadcasting, while " +
"input shape is %s, and output shape is %s" % (str(orig_shape), str(shape)))
expand_shape_len = len(shape) - len(orig_shape)
check_equal = 0
for so, sd in zip(orig_shape, shape[expand_shape_len:]):
if so == sd:
check_equal += 1
continue
elif so == 1:
continue
raise RuntimeError(
"For tensor broadcasting, shape must be the same or corresponding shape of src tensor is 1"
"while src shape is %s, and dst shape is %s" % (str(orig_shape), str(shape)))
if check_equal == len(shape):
return tensor
name = "broadcast_tensor_" + str(_name_index[0])
_name_index[0] += 1
op = 'broadcast_for_tensor'
lambda_func = lambda *indice: tensor(*([0 if orig_shape[i] == 1
else indice[i + expand_shape_len] for i in range(len(orig_shape))]))
with akg.tvm.tag_scope(op):
out = akg.tvm.compute(shape, lambda_func, name=name)
return out
var_type = judge_var(var)
tmp_args = var
if var_type == "python_const":
if isinstance(tmp_args, float):
tmp_args = akg.tvm.const(tmp_args, dtype="float16")
else:
tmp_args = akg.tvm.const(tmp_args, dtype="int32")
if not output_dtype:
output_dtype = tmp_args.dtype
tmp_args = tmp_args.astype(output_dtype)
lambda_func = lambda *indice: tmp_args
name = "broadcast_" + str(_name_index[0])
_name_index[0] += 1
op = 'broadcast'
with akg.tvm.tag_scope(op):
out = akg.tvm.compute(shape, lambda_func, name=name)
return out | def broadcast(var, shape, output_dtype=None):
"""
broadcast scalar to tensor, only support float16
Args:
var (Union[int, float, tvm.const]): input
shape (tvm.tensor.Tensor): shape
output_dtype (tvm.tensor.Tensor): var.dtype
Returns:
tvm.tensor.Tensor, broadcast tensor
"""
if isinstance(shape, akg.tvm.container.Array):
shape = shape_to_list(shape)
if isinstance(var, akg.tvm.tensor.Tensor):
tensor = var
orig_shape = shape_to_list(tensor.shape)
if len(orig_shape) > len(shape):
raise RuntimeError(
"Length of shape of input must be less than or equal to output for Tensor Broadcasting, while " +
"input shape is %s, and output shape is %s" % (str(orig_shape), str(shape)))
expand_shape_len = len(shape) - len(orig_shape)
check_equal = 0
for so, sd in zip(orig_shape, shape[expand_shape_len:]):
if so == sd:
check_equal += 1
continue
elif so == 1:
continue
raise RuntimeError(
"For tensor broadcasting, shape must be the same or corresponding shape of src tensor is 1"
"while src shape is %s, and dst shape is %s" % (str(orig_shape), str(shape)))
if check_equal == len(shape):
return tensor
name = "broadcast_tensor_" + str(_name_index[0])
_name_index[0] += 1
op = 'broadcast_for_tensor'
lambda_func = lambda *indice: tensor(*([0 if orig_shape[i] == 1
else indice[i + expand_shape_len] for i in range(len(orig_shape))]))
with akg.tvm.tag_scope(op):
out = akg.tvm.compute(shape, lambda_func, name=name)
return out
var_type = judge_var(var)
tmp_args = var
if var_type == "python_const":
if isinstance(tmp_args, float):
tmp_args = akg.tvm.const(tmp_args, dtype="float16")
else:
tmp_args = akg.tvm.const(tmp_args, dtype="int32")
if not output_dtype:
output_dtype = tmp_args.dtype
tmp_args = tmp_args.astype(output_dtype)
lambda_func = lambda *indice: tmp_args
name = "broadcast_" + str(_name_index[0])
_name_index[0] += 1
op = 'broadcast'
with akg.tvm.tag_scope(op):
out = akg.tvm.compute(shape, lambda_func, name=name)
return out |
Python | def maxpool_ad_no_custom_diff_poly_all_max(head, data, kernel, stride, pad):
"""automatic differentiate of maxpool with polyhedral"""
attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False}
maxpool_fwd = OldMaxPool(data, kernel, stride, pad)
[dl_ddata] = akg.differentiate(maxpool_fwd, [data], head, None, None)
return dl_ddata, attrs | def maxpool_ad_no_custom_diff_poly_all_max(head, data, kernel, stride, pad):
"""automatic differentiate of maxpool with polyhedral"""
attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False}
maxpool_fwd = OldMaxPool(data, kernel, stride, pad)
[dl_ddata] = akg.differentiate(maxpool_fwd, [data], head, None, None)
return dl_ddata, attrs |
Python | def maxpool_ad_no_custom_diff_manual_schedule_all_max(head, data, kernel, stride, pad):
"""automatic differentiate of maxpool with manual schedule."""
attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False}
maxpool_fwd = OldMaxPool(data, kernel, stride, pad)
[dl_ddata] = akg.differentiate(maxpool_fwd, [data], head, None, None)
# schedule for differetiation operation
s = akg.tvm.create_schedule([dl_ddata.op])
new_tensor_red = dl_ddata
new_tensor = new_tensor_red.op.input_tensors[0]
data = new_tensor.op.input_tensors[0]
broadcast = new_tensor.op.input_tensors[1]
head = new_tensor.op.input_tensors[2]
forward = broadcast.op.input_tensors[0]
def comp_func(s):
data_ub = s.cache_read(data, "local.UB", [forward, new_tensor])
head_ub = s.cache_read(head, "local.UB", [new_tensor])
result_ub = s.cache_write(new_tensor_red, "local.UB")
s[broadcast].set_scope("local.UB")
s[forward].set_scope("local.UB")
b, c1, h, w, c0 = forward.op.axis
oh, ow = forward.op.reduce_axis
s[forward].reorder(oh, ow, b, c1, h, w, c0)
s[new_tensor].set_scope("local.UB")
b, c1, h, w, c0 = result_ub.op.axis
s[result_ub].reorder(*result_ub.op.reduce_axis, b, c1, h, w, c0)
s[broadcast].compute_at(s[result_ub], b)
s[new_tensor].compute_at(s[result_ub], b)
return dl_ddata, comp_func, attrs | def maxpool_ad_no_custom_diff_manual_schedule_all_max(head, data, kernel, stride, pad):
"""automatic differentiate of maxpool with manual schedule."""
attrs = {"enable_post_poly_loop_partition": False, "enable_pre_poly_loop_partition": False}
maxpool_fwd = OldMaxPool(data, kernel, stride, pad)
[dl_ddata] = akg.differentiate(maxpool_fwd, [data], head, None, None)
# schedule for differetiation operation
s = akg.tvm.create_schedule([dl_ddata.op])
new_tensor_red = dl_ddata
new_tensor = new_tensor_red.op.input_tensors[0]
data = new_tensor.op.input_tensors[0]
broadcast = new_tensor.op.input_tensors[1]
head = new_tensor.op.input_tensors[2]
forward = broadcast.op.input_tensors[0]
def comp_func(s):
data_ub = s.cache_read(data, "local.UB", [forward, new_tensor])
head_ub = s.cache_read(head, "local.UB", [new_tensor])
result_ub = s.cache_write(new_tensor_red, "local.UB")
s[broadcast].set_scope("local.UB")
s[forward].set_scope("local.UB")
b, c1, h, w, c0 = forward.op.axis
oh, ow = forward.op.reduce_axis
s[forward].reorder(oh, ow, b, c1, h, w, c0)
s[new_tensor].set_scope("local.UB")
b, c1, h, w, c0 = result_ub.op.axis
s[result_ub].reorder(*result_ub.op.reduce_axis, b, c1, h, w, c0)
s[broadcast].compute_at(s[result_ub], b)
s[new_tensor].compute_at(s[result_ub], b)
return dl_ddata, comp_func, attrs |
Python | def maxpool_ad_manual_schedule_no_overlap_all_max(shape, kernel, stride, pad, dtype, attrs=None, polyhedral=False):
"""automatic differentiate of maxpool with manual schedule for no overlap case."""
kernel_h, kernel_w = kernel
stride_h, stride_w = stride
pad_h, pad_w, _, _ = pad
batch_size, input_c1, input_h, input_w, input_c0 = shape
pad_shape = (batch_size, input_c1, input_h + 2 * pad_h, input_w + 2 * pad_w, input_c0)
def custom_maxpool_fdiff(out, inputs, head_, ad_attrs, new_pld_array):
in_data = inputs[0]
if stride_w != kernel_w:
raise RuntimeError("Only supports kernels with same dimensions as stride size!")
if stride_h != kernel_h:
raise RuntimeError("Only supports kernels with same dimensions as stride size!")
out_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
out(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="out_broadcast")
# copy output to the shape of the padded input, copying the same value for the entire kernel size
out_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
out(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="out_broadcast")
# copy head to the shape of the padded input, copying the same value for the entire kernel size
head_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
head_(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="head_broadcast")
# check if value was a maximum and assign head of that position if it was
# this is done for all the maximum values within one kernel
result = akg.tvm.compute(in_data.shape,
lambda b, c1, h, w, c0:
akg.tvm.expr.Select(
in_data(b, c1, h, w, c0) == out_broadcast(b, c1, h + pad_h, w + pad_w, c0),
head_broadcast(b, c1, h + pad_h, w + pad_w, c0),
akg.tvm.const(0, dtype=in_data.dtype)),
name="result")
return [result]
out_size_h = (input_h + 2 * pad_h - kernel_h) // stride_h + 1
out_size_w = (input_w + 2 * pad_w - kernel_w) // stride_w + 1
out_shape = (batch_size, input_c1, out_size_h, out_size_w, input_c0)
# tensor for the input data
data = akg.tvm.placeholder(shape, dtype, name="input_data")
# maxpool output
forward = akg.tvm.placeholder(out_shape, name="forward", dtype=dtype)
# adjoint tensor for the differentiation
head = akg.tvm.placeholder(out_shape, name="head", dtype=dtype)
# override differentiation computation with custom function
[dl_ddata] = akg.differentiate(forward, [data], head, None, None,
override={forward: ([data], custom_maxpool_fdiff)})
# schedule for differetiation operation
s = akg.tvm.create_schedule([dl_ddata.op])
# get computations
result = dl_ddata
forward_broadcast = result.op.input_tensors[1]
head_broadcast = result.op.input_tensors[2]
# cache reads and writes
result_ub = s.cache_write(result, "local.UB")
data_ub = s.cache_read(data, "local.UB", [result_ub])
head_ub = s.cache_read(head, "local.UB", [head_broadcast])
forward_ub = s.cache_read(forward, "local.UB", [forward_broadcast])
s[head_broadcast].set_scope("local.UB")
s[forward_broadcast].set_scope("local.UB")
s[head_ub].compute_at(s[head_broadcast], head_broadcast.op.axis[0])
s[forward_ub].compute_at(s[forward_broadcast], forward_broadcast.op.axis[0])
s[data_ub].compute_at(s[result_ub], result_ub.op.axis[0])
s[forward_broadcast].compute_at(s[result_ub], result_ub.op.axis[0])
s[head_broadcast].compute_at(s[result_ub], result_ub.op.axis[0])
_, c1, h, _, _ = result.op.axis
if input_h + 2 * pad_h > 32 or input_w + 2 * pad_w > 32:
h_outer, _ = s[result].split(h, 4)
s[result_ub].compute_at(s[result], h_outer)
else:
s[result_ub].compute_at(s[result], c1)
with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
mod = akg.build(s, [head, data, forward, dl_ddata], "cce",
name="maxpool_ad_manual_schedule_no_overlap_all_max", attrs=attrs, polyhedral=polyhedral)
source_code = mod.imported_modules[0].get_source()
kernel_name = "maxpool_ad_manual_schedule_no_overlap_all_max"
create_code(kernel_name, './', source_code)
return mod | def maxpool_ad_manual_schedule_no_overlap_all_max(shape, kernel, stride, pad, dtype, attrs=None, polyhedral=False):
"""automatic differentiate of maxpool with manual schedule for no overlap case."""
kernel_h, kernel_w = kernel
stride_h, stride_w = stride
pad_h, pad_w, _, _ = pad
batch_size, input_c1, input_h, input_w, input_c0 = shape
pad_shape = (batch_size, input_c1, input_h + 2 * pad_h, input_w + 2 * pad_w, input_c0)
def custom_maxpool_fdiff(out, inputs, head_, ad_attrs, new_pld_array):
in_data = inputs[0]
if stride_w != kernel_w:
raise RuntimeError("Only supports kernels with same dimensions as stride size!")
if stride_h != kernel_h:
raise RuntimeError("Only supports kernels with same dimensions as stride size!")
out_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
out(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="out_broadcast")
# copy output to the shape of the padded input, copying the same value for the entire kernel size
out_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
out(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="out_broadcast")
# copy head to the shape of the padded input, copying the same value for the entire kernel size
head_broadcast = akg.tvm.compute(pad_shape,
lambda b, c1, h, w, c0:
head_(b, c1, akg.tvm.floordiv(h, stride_h), akg.tvm.floordiv(w, stride_w), c0),
name="head_broadcast")
# check if value was a maximum and assign head of that position if it was
# this is done for all the maximum values within one kernel
result = akg.tvm.compute(in_data.shape,
lambda b, c1, h, w, c0:
akg.tvm.expr.Select(
in_data(b, c1, h, w, c0) == out_broadcast(b, c1, h + pad_h, w + pad_w, c0),
head_broadcast(b, c1, h + pad_h, w + pad_w, c0),
akg.tvm.const(0, dtype=in_data.dtype)),
name="result")
return [result]
out_size_h = (input_h + 2 * pad_h - kernel_h) // stride_h + 1
out_size_w = (input_w + 2 * pad_w - kernel_w) // stride_w + 1
out_shape = (batch_size, input_c1, out_size_h, out_size_w, input_c0)
# tensor for the input data
data = akg.tvm.placeholder(shape, dtype, name="input_data")
# maxpool output
forward = akg.tvm.placeholder(out_shape, name="forward", dtype=dtype)
# adjoint tensor for the differentiation
head = akg.tvm.placeholder(out_shape, name="head", dtype=dtype)
# override differentiation computation with custom function
[dl_ddata] = akg.differentiate(forward, [data], head, None, None,
override={forward: ([data], custom_maxpool_fdiff)})
# schedule for differetiation operation
s = akg.tvm.create_schedule([dl_ddata.op])
# get computations
result = dl_ddata
forward_broadcast = result.op.input_tensors[1]
head_broadcast = result.op.input_tensors[2]
# cache reads and writes
result_ub = s.cache_write(result, "local.UB")
data_ub = s.cache_read(data, "local.UB", [result_ub])
head_ub = s.cache_read(head, "local.UB", [head_broadcast])
forward_ub = s.cache_read(forward, "local.UB", [forward_broadcast])
s[head_broadcast].set_scope("local.UB")
s[forward_broadcast].set_scope("local.UB")
s[head_ub].compute_at(s[head_broadcast], head_broadcast.op.axis[0])
s[forward_ub].compute_at(s[forward_broadcast], forward_broadcast.op.axis[0])
s[data_ub].compute_at(s[result_ub], result_ub.op.axis[0])
s[forward_broadcast].compute_at(s[result_ub], result_ub.op.axis[0])
s[head_broadcast].compute_at(s[result_ub], result_ub.op.axis[0])
_, c1, h, _, _ = result.op.axis
if input_h + 2 * pad_h > 32 or input_w + 2 * pad_w > 32:
h_outer, _ = s[result].split(h, 4)
s[result_ub].compute_at(s[result], h_outer)
else:
s[result_ub].compute_at(s[result], c1)
with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
mod = akg.build(s, [head, data, forward, dl_ddata], "cce",
name="maxpool_ad_manual_schedule_no_overlap_all_max", attrs=attrs, polyhedral=polyhedral)
source_code = mod.imported_modules[0].get_source()
kernel_name = "maxpool_ad_manual_schedule_no_overlap_all_max"
create_code(kernel_name, './', source_code)
return mod |
Python | def UnsortedSegmentSum(data, indices, num, op_id=0, target=utils.CUDA):
"""
Computes the sum value along ids_tensor of a akg.tvm.Tensor
Args:
input_data (tvm.tensor.Tensor): Tensor of type float16, float32, int32
ids_tensor (tvm.tensor.Tensor): Tensor of type int32, shape is a prefix of input_data.shape.
num_segments (int): the number of classes in ids_tensor
Returns:
tvm.tensor.Tensor of same type as input_data,
Raises:
RuntimeError: If the type of input_data is invalid.
Supported Platforms:
'Ascend', 'GPU'
"""
if target == utils.CCE:
return unsorted_segment_sum_ascend(data, indices, num)
return tensor_unsorted_segment_sum((data, indices), {'num_segments': num, 'op_id': op_id}) | def UnsortedSegmentSum(data, indices, num, op_id=0, target=utils.CUDA):
"""
Computes the sum value along ids_tensor of a akg.tvm.Tensor
Args:
input_data (tvm.tensor.Tensor): Tensor of type float16, float32, int32
ids_tensor (tvm.tensor.Tensor): Tensor of type int32, shape is a prefix of input_data.shape.
num_segments (int): the number of classes in ids_tensor
Returns:
tvm.tensor.Tensor of same type as input_data,
Raises:
RuntimeError: If the type of input_data is invalid.
Supported Platforms:
'Ascend', 'GPU'
"""
if target == utils.CCE:
return unsorted_segment_sum_ascend(data, indices, num)
return tensor_unsorted_segment_sum((data, indices), {'num_segments': num, 'op_id': op_id}) |
Python | def pack(x, axis):
"""
Concatenates tensors along one dimension.
Args:
x (Union[tuple, list]): Inprut tensor. Support int8, uint8,
int16, uint16, int32, uint32, int64, uint64,
float16, float32.
axis (int): in the range [-rank(x), rank(x))
Returns:
tvm.tensor.Tensor
"""
for _, tensor in enumerate(x):
shape_tensor = get_shape(tensor)
utils.check_shape(shape_tensor)
utils.ops_dtype_check(
tensor.dtype, [
utils.DtypeForDavinci.BOOL,
utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT16,
utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.INT64,
utils.DtypeForDavinci.UINT8, utils.DtypeForDavinci.UINT16,
utils.DtypeForDavinci.UINT32, utils.DtypeForDavinci.UINT64,
utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32
])
if (axis < -len(get_shape(x[0])) - 1) or (axis > len(get_shape(x[0]))):
raise RuntimeError(
"pack axis must be in [-%d , %d), "
"actual is %d" % (
len(get_shape(x[0])) + 1, len(get_shape(x[0])) + 1, axis))
if axis == -1 or axis == len(get_shape(x[0])):
raise RuntimeError("pack does not support the last dimension")
if axis < -1:
axis = axis + 1
return topi.concatenate(tuple(x), axis=axis) | def pack(x, axis):
"""
Concatenates tensors along one dimension.
Args:
x (Union[tuple, list]): Inprut tensor. Support int8, uint8,
int16, uint16, int32, uint32, int64, uint64,
float16, float32.
axis (int): in the range [-rank(x), rank(x))
Returns:
tvm.tensor.Tensor
"""
for _, tensor in enumerate(x):
shape_tensor = get_shape(tensor)
utils.check_shape(shape_tensor)
utils.ops_dtype_check(
tensor.dtype, [
utils.DtypeForDavinci.BOOL,
utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT16,
utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.INT64,
utils.DtypeForDavinci.UINT8, utils.DtypeForDavinci.UINT16,
utils.DtypeForDavinci.UINT32, utils.DtypeForDavinci.UINT64,
utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32
])
if (axis < -len(get_shape(x[0])) - 1) or (axis > len(get_shape(x[0]))):
raise RuntimeError(
"pack axis must be in [-%d , %d), "
"actual is %d" % (
len(get_shape(x[0])) + 1, len(get_shape(x[0])) + 1, axis))
if axis == -1 or axis == len(get_shape(x[0])):
raise RuntimeError("pack does not support the last dimension")
if axis < -1:
axis = axis + 1
return topi.concatenate(tuple(x), axis=axis) |
Python | def Reciprocal(data, high_precision=True, target=utils.CCE):
"""
Computes the reciprocal of data element-wise.
Args:
data (list[tvm.tensor.Tensor]): a list of tvm.tensor.Tensor of type float16, float32.
high_precision (bool): a bool value, whether to use high-precision version.
Returns:
tvm.tensor.Tensor of same type and shape as data.
Supported Platforms:
'Ascend', 'GPU'
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
utils.check_shape(shape)
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, data.dtype) / (data(*indice)), name="res")
# When product is mini, using Newtom iteration method to achieve higher precision.
if product_is_mini() and high_precision:
steps = 1
for _ in range(steps):
temp1 = data * res
temp2 = temp1 * akg.tvm.const(-1, data.dtype)
temp3 = temp2 + akg.tvm.const(2, data.dtype)
res = temp3 * res
return res | def Reciprocal(data, high_precision=True, target=utils.CCE):
"""
Computes the reciprocal of data element-wise.
Args:
data (list[tvm.tensor.Tensor]): a list of tvm.tensor.Tensor of type float16, float32.
high_precision (bool): a bool value, whether to use high-precision version.
Returns:
tvm.tensor.Tensor of same type and shape as data.
Supported Platforms:
'Ascend', 'GPU'
"""
utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
utils.check_shape(shape)
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, data.dtype) / (data(*indice)), name="res")
# When product is mini, using Newtom iteration method to achieve higher precision.
if product_is_mini() and high_precision:
steps = 1
for _ in range(steps):
temp1 = data * res
temp2 = temp1 * akg.tvm.const(-1, data.dtype)
temp3 = temp2 + akg.tvm.const(2, data.dtype)
res = temp3 * res
return res |
Python | def load_data(path, printout=True):
"""
This function loads the time series data.
Parameters
----------
path : str
The path to the csv file.
Returns
-------
df : pandas.core.frame.DataFrame
The DataFrame containing the data.
"""
df = pd.read_csv(path).rename(columns={'Unnamed: 0': 'Date'})
df.Date = pd.to_datetime(df.Date)
df = df.set_index('Date')
df = df.set_index(df.index.to_period())
if printout:
print('Loaded: %d samples x %d features' %df.shape)
return df | def load_data(path, printout=True):
"""
This function loads the time series data.
Parameters
----------
path : str
The path to the csv file.
Returns
-------
df : pandas.core.frame.DataFrame
The DataFrame containing the data.
"""
df = pd.read_csv(path).rename(columns={'Unnamed: 0': 'Date'})
df.Date = pd.to_datetime(df.Date)
df = df.set_index('Date')
df = df.set_index(df.index.to_period())
if printout:
print('Loaded: %d samples x %d features' %df.shape)
return df |
Python | def compute_error(trues, predicted):
"""
This function takes as input the true and predicted series,
then computes and returns the Correlation Coefficient, the Mean Average Error,
the Relative Absolute Error, the Root Mean Square Error and the r coefficient.
"""
#corr = np.corrcoef(predicted, trues)[0,1]
mae = np.mean(np.abs(predicted - trues))
rae = np.sum(np.abs(predicted - trues)) / np.sum(np.abs(trues - np.mean(trues)))
rmse = np.sqrt(np.mean((predicted - trues)**2))
r2 = max(0, 1 - np.sum((trues-predicted)**2) / np.sum((trues - np.mean(trues))**2))
return mae, rae, rmse, r2 | def compute_error(trues, predicted):
"""
This function takes as input the true and predicted series,
then computes and returns the Correlation Coefficient, the Mean Average Error,
the Relative Absolute Error, the Root Mean Square Error and the r coefficient.
"""
#corr = np.corrcoef(predicted, trues)[0,1]
mae = np.mean(np.abs(predicted - trues))
rae = np.sum(np.abs(predicted - trues)) / np.sum(np.abs(trues - np.mean(trues)))
rmse = np.sqrt(np.mean((predicted - trues)**2))
r2 = max(0, 1 - np.sum((trues-predicted)**2) / np.sum((trues - np.mean(trues))**2))
return mae, rae, rmse, r2 |
Python | def gen_embeddings(n_words, word2index, emb_dim=300, emb_file='glove.6B.300d.txt'):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.random.randn(n_words, emb_dim) * 0.01
print('Embeddings: %d x %d' % (n_words, emb_dim))
if emb_file is not None:
print('Loading embedding file: %s' % emb_file)
pre_trained = 0
for line in open(emb_file).readlines():
sp = line.split()
if(len(sp) == emb_dim + 1):
if sp[0] in word2index:
pre_trained += 1
embeddings[word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / n_words))
return embeddings | def gen_embeddings(n_words, word2index, emb_dim=300, emb_file='glove.6B.300d.txt'):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.random.randn(n_words, emb_dim) * 0.01
print('Embeddings: %d x %d' % (n_words, emb_dim))
if emb_file is not None:
print('Loading embedding file: %s' % emb_file)
pre_trained = 0
for line in open(emb_file).readlines():
sp = line.split()
if(len(sp) == emb_dim + 1):
if sp[0] in word2index:
pre_trained += 1
embeddings[word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / n_words))
return embeddings |
Python | def emotion_intensity(NRC, word):
'''
Function to calculate emotion intensity (Eq. 1 in our paper)
:param NRC: NRC_VAD vectors
:param word: query word
:return:
'''
v, a, d = NRC[word]
a = a/2
return (np.linalg.norm(np.array([v, a]) - np.array([0.5, 0])) - 0.06467)/0.607468 | def emotion_intensity(NRC, word):
'''
Function to calculate emotion intensity (Eq. 1 in our paper)
:param NRC: NRC_VAD vectors
:param word: query word
:return:
'''
v, a, d = NRC[word]
a = a/2
return (np.linalg.norm(np.array([v, a]) - np.array([0.5, 0])) - 0.06467)/0.607468 |
Python | def advance(self, word_prob):
"Update beam status and check if finished or not."
num_words = word_prob.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
else:
beam_lk = word_prob[0]
flat_beam_lk = beam_lk.view(-1)
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 2nd sort
self.all_scores.append(self.scores)
self.scores = best_scores
# bestScoresId is flattened as a (beam x word) array,
# so we need to calculate which word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.next_ys[-1][0].item() == self.args.EOS_idx:
self._done = True
self.all_scores.append(self.scores)
return self._done | def advance(self, word_prob):
"Update beam status and check if finished or not."
num_words = word_prob.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
else:
beam_lk = word_prob[0]
flat_beam_lk = beam_lk.view(-1)
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 2nd sort
self.all_scores.append(self.scores)
self.scores = best_scores
# bestScoresId is flattened as a (beam x word) array,
# so we need to calculate which word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.next_ys[-1][0].item() == self.args.EOS_idx:
self._done = True
self.all_scores.append(self.scores)
return self._done |
Python | def beam_decode_step(inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm,
enc_batch_extend_vocab, extra_zeros, mask_src, encoder_db, mask_transformer_db,
DB_ext_vocab_batch):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):
dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)
dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab,
extra_zeros, mask_src, encoder_db, mask_transformer_db, DB_ext_vocab_batch):
## masking
mask_trg = dec_seq.data.eq(self.args.PAD_idx).unsqueeze(1)
mask_src = torch.cat([mask_src[0].unsqueeze(0)] * mask_trg.size(0), 0)
dec_output, attn_dist = self.model.decoder(self.model.embedding(dec_seq), enc_output,
(mask_src, mask_trg))
db_dist = None
prob = self.model.generator(dec_output, attn_dist, enc_batch_extend_vocab, extra_zeros, 1, True,
attn_dist_db=db_dist)
# prob = F.log_softmax(prob,dim=-1) #fix the name later
word_prob = prob[:, -1]
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab,
extra_zeros, mask_src, encoder_db, mask_transformer_db, DB_ext_vocab_batch)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list | def beam_decode_step(inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm,
enc_batch_extend_vocab, extra_zeros, mask_src, encoder_db, mask_transformer_db,
DB_ext_vocab_batch):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):
dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)
dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab,
extra_zeros, mask_src, encoder_db, mask_transformer_db, DB_ext_vocab_batch):
## masking
mask_trg = dec_seq.data.eq(self.args.PAD_idx).unsqueeze(1)
mask_src = torch.cat([mask_src[0].unsqueeze(0)] * mask_trg.size(0), 0)
dec_output, attn_dist = self.model.decoder(self.model.embedding(dec_seq), enc_output,
(mask_src, mask_trg))
db_dist = None
prob = self.model.generator(dec_output, attn_dist, enc_batch_extend_vocab, extra_zeros, 1, True,
attn_dist_db=db_dist)
# prob = F.log_softmax(prob,dim=-1) #fix the name later
word_prob = prob[:, -1]
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm, enc_batch_extend_vocab,
extra_zeros, mask_src, encoder_db, mask_transformer_db, DB_ext_vocab_batch)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list |
Python | def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / (ratio + 1e-16))
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) | def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / (ratio + 1e-16))
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) |
Python | def gen_embeddings(args, n_words, word2index):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.random.randn(n_words, args.emb_dim) * 0.01
print('Embeddings: %d x %d' % (n_words, args.emb_dim))
if args.emb_file is not None:
print('Loading embedding file: %s' % args.emb_file)
pre_trained = 0
for line in open(args.emb_file).readlines():
sp = line.split()
if(len(sp) == args.emb_dim + 1):
if sp[0] in word2index:
pre_trained += 1
embeddings[word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / n_words))
return embeddings | def gen_embeddings(args, n_words, word2index):
"""
Generate an initial embedding matrix for `word_dict`.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
embeddings = np.random.randn(n_words, args.emb_dim) * 0.01
print('Embeddings: %d x %d' % (n_words, args.emb_dim))
if args.emb_file is not None:
print('Loading embedding file: %s' % args.emb_file)
pre_trained = 0
for line in open(args.emb_file).readlines():
sp = line.split()
if(len(sp) == args.emb_dim + 1):
if sp[0] in word2index:
pre_trained += 1
embeddings[word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / n_words))
return embeddings |
Python | def _lint(self):
"""Run linter in a subprocess."""
command = self._get_command()
process = subprocess.run(command, stdout=subprocess.PIPE)
LOG.info('Finished %s', ' '.join(command))
return process.stdout.decode('utf-8') | def _lint(self):
"""Run linter in a subprocess."""
command = self._get_command()
process = subprocess.run(command, stdout=subprocess.PIPE)
LOG.info('Finished %s', ' '.join(command))
return process.stdout.decode('utf-8') |
Python | def _get_command(self):
"""Return command with options and targets, ready for execution."""
targets = ' '.join(self.targets)
cmd_str = self._linter.command_with_options + ' ' + targets
cmd_shlex = shlex.split(cmd_str)
return list(cmd_shlex) | def _get_command(self):
"""Return command with options and targets, ready for execution."""
targets = ' '.join(self.targets)
cmd_str = self._linter.command_with_options + ' ' + targets
cmd_shlex = shlex.split(cmd_str)
return list(cmd_shlex) |
Python | def lint(self, targets):
"""Run linters in parallel and sort all results.
Args:
targets (list): List of files and folders to lint.
"""
LinterRunner.targets = targets
linters = self._config.get_linter_classes()
with Pool() as pool:
linters_results = pool.map(LinterRunner.run, linters)
return sorted(chain.from_iterable(linters_results)) | def lint(self, targets):
"""Run linters in parallel and sort all results.
Args:
targets (list): List of files and folders to lint.
"""
LinterRunner.targets = targets
linters = self._config.get_linter_classes()
with Pool() as pool:
linters_results = pool.map(LinterRunner.run, linters)
return sorted(chain.from_iterable(linters_results)) |
Python | def run_from_cli(self, args):
"""Read arguments, run and print results.
Args:
args (dict): Arguments parsed by docopt.
"""
if args['--dump-config']:
self._config.print_config()
else:
results = self.lint(args['<path>'])
self.print_results(results) | def run_from_cli(self, args):
"""Read arguments, run and print results.
Args:
args (dict): Arguments parsed by docopt.
"""
if args['--dump-config']:
self._config.print_config()
else:
results = self.lint(args['<path>'])
self.print_results(results) |
Python | def print_results(results):
"""Print linter results and exits with an error if there's any."""
if results:
for result in results:
print(result)
issue = 'issues' if len(results) > 1 else 'issue'
sys.exit('\n:( {} {} found.'.format(len(results), issue))
else:
print(':) No issues found.') | def print_results(results):
"""Print linter results and exits with an error if there's any."""
if results:
for result in results:
print(result)
issue = 'issues' if len(results) > 1 else 'issue'
sys.exit('\n:( {} {} found.'.format(len(results), issue))
else:
print(':) No issues found.') |
Python | def sleep(seconds: int = 0):
"""The Python sleep () time method suspends execution for the
given number of seconds. The argument can be a floating point
number to indicate a more accurate sleep time.
:param seconds: Total of seconds that the program will be sleeping
"""
time.sleep(seconds) | def sleep(seconds: int = 0):
"""The Python sleep () time method suspends execution for the
given number of seconds. The argument can be a floating point
number to indicate a more accurate sleep time.
:param seconds: Total of seconds that the program will be sleeping
"""
time.sleep(seconds) |
Python | def cors(
methods: str = METHODS,
has_credentials: bool = HAS_CREDENTIALS,
origin: str = ORIGIN,
headers: str = HEADERS,
expose_headers: str = EXPOSE_HEADERS
):
"""CORS is an abbreviation that stands for 'Cross-Origin Resource
Sharing' and, as the name implies, allows sharing of resources
from a variety of sources.
A simple cross-origin request would be when domain1.com would be
accessing a resource from domain2.com (the resource is an image,
a CSS file, or something else). This has some massive security
implications, of course, and the built-in behavior for browsers
is that they would restrict the cross-origin HTTP request.
:param methods: The Access-Control-Allow-Methods response header indicates
which HTTP methods are allowed on a particular endpoint for cross-origin requests.
:param has_credentials: Access-Control-Allow-Credentials response header tells
the browser that the server allows
credentials for a cross-origin request.
:param origin: The Access-Control-Allow-Origin response header indicates
whether the resources in the response can be shared with the given origin.
:param headers: The Access-Control-Allow-Headers response header is used
in response to a preflight request that includes the Access-Control-Request-Headers
to indicate which HTTP headers can be used during the actual request
:param expose_headers: The Access-Control-Expose-Headers response header indicates
which headers can be exposed as part of the response by listing
``from retic import Router``
``router = Router()``
``router.use(cors())``
"""
def set_middleware(req, res, next):
"""Set all cors headers in a response to Client.
:param req: Request is used to describe an request to a server.
:param res: Represents a response from a web request.
:param next: It must call next() to pass control to the next middleware function
"""
res.set_headers('Access-Control-Allow-Methods', has_credentials)
res.set_headers('Access-Control-Allow-Credentials', methods)
res.set_headers('Access-Control-Allow-Origin', origin)
res.set_headers('Access-Control-Allow-Headers', headers)
res.set_headers('Access-Control-Expose-Headers', expose_headers)
next()
return set_middleware | def cors(
methods: str = METHODS,
has_credentials: bool = HAS_CREDENTIALS,
origin: str = ORIGIN,
headers: str = HEADERS,
expose_headers: str = EXPOSE_HEADERS
):
"""CORS is an abbreviation that stands for 'Cross-Origin Resource
Sharing' and, as the name implies, allows sharing of resources
from a variety of sources.
A simple cross-origin request would be when domain1.com would be
accessing a resource from domain2.com (the resource is an image,
a CSS file, or something else). This has some massive security
implications, of course, and the built-in behavior for browsers
is that they would restrict the cross-origin HTTP request.
:param methods: The Access-Control-Allow-Methods response header indicates
which HTTP methods are allowed on a particular endpoint for cross-origin requests.
:param has_credentials: Access-Control-Allow-Credentials response header tells
the browser that the server allows
credentials for a cross-origin request.
:param origin: The Access-Control-Allow-Origin response header indicates
whether the resources in the response can be shared with the given origin.
:param headers: The Access-Control-Allow-Headers response header is used
in response to a preflight request that includes the Access-Control-Request-Headers
to indicate which HTTP headers can be used during the actual request
:param expose_headers: The Access-Control-Expose-Headers response header indicates
which headers can be exposed as part of the response by listing
``from retic import Router``
``router = Router()``
``router.use(cors())``
"""
def set_middleware(req, res, next):
"""Set all cors headers in a response to Client.
:param req: Request is used to describe an request to a server.
:param res: Represents a response from a web request.
:param next: It must call next() to pass control to the next middleware function
"""
res.set_headers('Access-Control-Allow-Methods', has_credentials)
res.set_headers('Access-Control-Allow-Credentials', methods)
res.set_headers('Access-Control-Allow-Origin', origin)
res.set_headers('Access-Control-Allow-Headers', headers)
res.set_headers('Access-Control-Expose-Headers', expose_headers)
next()
return set_middleware |
Python | def handle_request(self, req, res, next): # dispatch
"""Return a handle request for specific route"""
try:
return self.handle(req, res, next)
except Exception as e:
get_file_error_exception(3)
raise RuntimeError(
"error: Some controller has a problem, check it please. {0}".format(
str(e)
)
) | def handle_request(self, req, res, next): # dispatch
"""Return a handle request for specific route"""
try:
return self.handle(req, res, next)
except Exception as e:
get_file_error_exception(3)
raise RuntimeError(
"error: Some controller has a problem, check it please. {0}".format(
str(e)
)
) |
Python | def match(self, path: str): # self
"""Search to specific layer for a path
:param path: Route to searching in the layer
:return: Specific layer that was matched with the path"""
_match = self.regexp.match(path)
if not _match:
self.path = self.params = None
return None
self.path = _match.string
self.params = _match.groupdict()
return self | def match(self, path: str): # self
"""Search to specific layer for a path
:param path: Route to searching in the layer
:return: Specific layer that was matched with the path"""
_match = self.regexp.match(path)
if not _match:
self.path = self.params = None
return None
self.path = _match.string
self.params = _match.groupdict()
return self |
Python | def main(self, environ: dict, start_response: dict): # []
"""Router main for handle all routes
:param environ: Request is used to describe an request to a server.
:param start_response: Represents a response from a web request."""
try:
_request = Request(environ)._config()
_response = Response()._config(environ, start_response, self._set_response)
self._endpoint(_request, _response)
_result = self._response_request(_response, self.result)
self.result = None
return _result
except Exception as e:
return Response(str(e))(environ, start_response) | def main(self, environ: dict, start_response: dict): # []
"""Router main for handle all routes
:param environ: Request is used to describe an request to a server.
:param start_response: Represents a response from a web request."""
try:
_request = Request(environ)._config()
_response = Response()._config(environ, start_response, self._set_response)
self._endpoint(_request, _response)
_result = self._response_request(_response, self.result)
self.result = None
return _result
except Exception as e:
return Response(str(e))(environ, start_response) |
Python | def use(self, fn):
"""Use to add a middleware that is execute for all routes
:param fn: Route dispatch function for the request
"""
"""Set middle to routes"""
self.middlewares.append(
self._set_handler_to_layer(fn)
) | def use(self, fn):
"""Use to add a middleware that is execute for all routes
:param fn: Route dispatch function for the request
"""
"""Set middle to routes"""
self.middlewares.append(
self._set_handler_to_layer(fn)
) |
Python | def route(self, path: str, method: str, handlers: list): # Route
"""Create a Route instance and adds this route to specific controller
of a HTTP method in a Router.
:param path: Route to searching in the layer
:param method: HTTP Request Method from the client request
:param handlers: List of controllers functions from a routes files
"""
_route = Route(path)
for _handler in handlers:
"""Set handler to route"""
_route.stack.append(
self._set_handler_to_layer(_handler)
)
_layer = Layer(
path,
{
u"strict": self.rules.get('strict_slashes', False),
u"end": True
},
_route.dispatch
)
_layer.route = _route
self.methods[method].append(_layer)
return _route | def route(self, path: str, method: str, handlers: list): # Route
"""Create a Route instance and adds this route to specific controller
of a HTTP method in a Router.
:param path: Route to searching in the layer
:param method: HTTP Request Method from the client request
:param handlers: List of controllers functions from a routes files
"""
_route = Route(path)
for _handler in handlers:
"""Set handler to route"""
_route.stack.append(
self._set_handler_to_layer(_handler)
)
_layer = Layer(
path,
{
u"strict": self.rules.get('strict_slashes', False),
u"end": True
},
_route.dispatch
)
_layer.route = _route
self.methods[method].append(_layer)
return _route |
Python | def _response_request(self, res, result):
"""Response to a client request. If response was not specific, return
status 200 and message 200 for default
:param res: Represents a response from a web request.
:param result: Instance of the object with the werkzeug response
"""
if result:
return result
return res.ok() | def _response_request(self, res, result):
"""Response to a client request. If response was not specific, return
status 200 and message 200 for default
:param res: Represents a response from a web request.
:param result: Instance of the object with the werkzeug response
"""
if result:
return result
return res.ok() |
Python | def _endpoint(self, req: Request, res: Response): # []
"""This function handle any request from a client, search in the Route List
:param req: Request is used to describe an request to a server.
:param res: Represents a response from a web request."""
try:
# validate if path contains slash and it isn't a "/" path
if self.rules.get('strict_slashes', False) \
and len(req.path) > 1 \
and "/" in req.path[-1]:
raise ValueError(req.path[:-1])
# search the specific method
_method = self.methods[req.method]
if not _method:
raise KeyError(
"error: The HTTP method {0} doesn't exist".format(
req.method)
)
# search the layer for this method
_layer: Layer = self._search_layer(req.path, _method)
if not _layer:
raise KeyError(
"error: The HTTP method {0} doesn't exist".format(
req.method)
)
# search the first handle for this one
_has_method = self._handles_method(_layer)
assert _has_method, "error: The route has the next format: METHOD(path, [...handlers functions])"
# set the params to request
req.params = _layer.params
# return the handle logic
return _layer.handle_request(req, res, Next(req, res, _layer, self.middlewares))
except ValueError as e:
return res.redirect(str(e))
except KeyError as e:
return res.not_found(str(e))
except Exception as e:
traceback.print_exc(file=sys.stdout)
return res.bad_request(str(e)) | def _endpoint(self, req: Request, res: Response): # []
"""This function handle any request from a client, search in the Route List
:param req: Request is used to describe an request to a server.
:param res: Represents a response from a web request."""
try:
# validate if path contains slash and it isn't a "/" path
if self.rules.get('strict_slashes', False) \
and len(req.path) > 1 \
and "/" in req.path[-1]:
raise ValueError(req.path[:-1])
# search the specific method
_method = self.methods[req.method]
if not _method:
raise KeyError(
"error: The HTTP method {0} doesn't exist".format(
req.method)
)
# search the layer for this method
_layer: Layer = self._search_layer(req.path, _method)
if not _layer:
raise KeyError(
"error: The HTTP method {0} doesn't exist".format(
req.method)
)
# search the first handle for this one
_has_method = self._handles_method(_layer)
assert _has_method, "error: The route has the next format: METHOD(path, [...handlers functions])"
# set the params to request
req.params = _layer.params
# return the handle logic
return _layer.handle_request(req, res, Next(req, res, _layer, self.middlewares))
except ValueError as e:
return res.redirect(str(e))
except KeyError as e:
return res.not_found(str(e))
except Exception as e:
traceback.print_exc(file=sys.stdout)
return res.bad_request(str(e)) |
Python | def _search_layer(self, path: str, method: str): # Layer
"""Search a specific Layer using a path and method.
:param path: Route to searching in the layer
:param method: HTTP Request Method from the client request
:return: None if the layer doesn't exists, and a Layer Object
if the layer is found."""
_match = None
for _layer in method:
_match = _layer.match(path)
if _match:
break
return _match | def _search_layer(self, path: str, method: str): # Layer
"""Search a specific Layer using a path and method.
:param path: Route to searching in the layer
:param method: HTTP Request Method from the client request
:return: None if the layer doesn't exists, and a Layer Object
if the layer is found."""
_match = None
for _layer in method:
_match = _layer.match(path)
if _match:
break
return _match |
Python | def validate_obligate_fields(fields: any = None):
"""Validate if a list of obligate params are valid
:param fields: object that contains all params that are obligate,
these values can be arrays or simple values."""
'''You can use the following example:
_validate = validate_obligate_fields({
u'files': req.files.get("files", None)
})
if _validate["valid"] is False:
return res.bad_request(
error_response(
"The param {} is necesary.".format(_validate["error"])
)
)
'''
if not fields:
raise ValueError("error: A value for fields is necessary.")
for field in fields:
_item = fields.get(field, None)
if _item == None \
or (isinstance(_item, str) and _item == "") \
or (isinstance(_item, list) and not _item):
return {
u'valid': False,
u'error': field
}
return {
u'valid': True,
u'error': None
} | def validate_obligate_fields(fields: any = None):
"""Validate if a list of obligate params are valid
:param fields: object that contains all params that are obligate,
these values can be arrays or simple values."""
'''You can use the following example:
_validate = validate_obligate_fields({
u'files': req.files.get("files", None)
})
if _validate["valid"] is False:
return res.bad_request(
error_response(
"The param {} is necesary.".format(_validate["error"])
)
)
'''
if not fields:
raise ValueError("error: A value for fields is necessary.")
for field in fields:
_item = fields.get(field, None)
if _item == None \
or (isinstance(_item, str) and _item == "") \
or (isinstance(_item, list) and not _item):
return {
u'valid': False,
u'error': field
}
return {
u'valid': True,
u'error': None
} |
Python | def application(self, environ, start_response):
"""Application for send the response returned by the application to the client
:param environ: Request is used to describe an request to a server.
:param start_response: Represents a response from a web request."""
if not self.router:
if environ.get('PATH_INFO') == '/':
_status = '200 OK'
_message = "Welcome to Retic!"
else:
_status = '404 Not found'
_message = "error: The HTTP method {0} doesn't exist".format(
environ.get('REQUEST_METHOD')
)
start_response(_status, [('Content-Type', 'text/html')])
return [_message.encode("utf8")]
else:
return self.router.main(environ, start_response) | def application(self, environ, start_response):
"""Application for send the response returned by the application to the client
:param environ: Request is used to describe an request to a server.
:param start_response: Represents a response from a web request."""
if not self.router:
if environ.get('PATH_INFO') == '/':
_status = '200 OK'
_message = "Welcome to Retic!"
else:
_status = '404 Not found'
_message = "error: The HTTP method {0} doesn't exist".format(
environ.get('REQUEST_METHOD')
)
start_response(_status, [('Content-Type', 'text/html')])
return [_message.encode("utf8")]
else:
return self.router.main(environ, start_response) |
Python | def use(self, item: any, name: str = ""):
"""method of configuring the middleware.
:param item: Item of specific type for specific settings in a app
:param name: Name of the item to save
"""
"""TODO: implement another types of item"""
if isinstance(item, Router):
self.router = item
elif name:
self.apps.setdefault(name, item)
else:
raise KeyError("error: A name for the item is necesary") | def use(self, item: any, name: str = ""):
"""method of configuring the middleware.
:param item: Item of specific type for specific settings in a app
:param name: Name of the item to save
"""
"""TODO: implement another types of item"""
if isinstance(item, Router):
self.router = item
elif name:
self.apps.setdefault(name, item)
else:
raise KeyError("error: A name for the item is necesary") |
Python | def listen(
self,
hostname: str = APP_HOSTNAME,
port: int = APP_PORT,
application: any = None,
use_reloader: bool = False,
use_debugger: bool = False,
use_evalex: bool = True,
extra_files: any = None,
reloader_interval: int = 1,
reloader_type: str = 'auto',
threaded: bool = False,
processes: int = 1,
request_handler: any = None,
static_files: any = None,
passthrough_errors=False,
ssl_context: any = None
):
"""Create a server based in settings parameters.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
source by: werkzeug.serving
"""
"""TODO: Welcome message after the server is created"""
run_simple(
hostname=hostname,
port=int(port),
application=application or self.application,
use_reloader=use_reloader,
use_debugger=use_debugger,
use_evalex=use_evalex,
extra_files=extra_files,
reloader_interval=reloader_interval,
reloader_type=reloader_type,
threaded=threaded,
processes=processes,
request_handler=request_handler,
static_files=static_files,
passthrough_errors=passthrough_errors,
ssl_context=ssl_context
) | def listen(
self,
hostname: str = APP_HOSTNAME,
port: int = APP_PORT,
application: any = None,
use_reloader: bool = False,
use_debugger: bool = False,
use_evalex: bool = True,
extra_files: any = None,
reloader_interval: int = 1,
reloader_type: str = 'auto',
threaded: bool = False,
processes: int = 1,
request_handler: any = None,
static_files: any = None,
passthrough_errors=False,
ssl_context: any = None
):
"""Create a server based in settings parameters.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
source by: werkzeug.serving
"""
"""TODO: Welcome message after the server is created"""
run_simple(
hostname=hostname,
port=int(port),
application=application or self.application,
use_reloader=use_reloader,
use_debugger=use_debugger,
use_evalex=use_evalex,
extra_files=extra_files,
reloader_interval=reloader_interval,
reloader_type=reloader_type,
threaded=threaded,
processes=processes,
request_handler=request_handler,
static_files=static_files,
passthrough_errors=passthrough_errors,
ssl_context=ssl_context
) |
Python | def config(self, value):
"""If the type of the value is not dict, it is not allowed"""
if not isinstance(value, dict):
raise TypeError(
"error: A settings dictionary of type dict is necesary"
)
self.__config = value | def config(self, value):
"""If the type of the value is not dict, it is not allowed"""
if not isinstance(value, dict):
raise TypeError(
"error: A settings dictionary of type dict is necesary"
)
self.__config = value |
Python | def from_object(self, settings: dict):
"""Set settings in based a dictionary, for example, if you want to
set a additional configuration you nedd pass:
``app.config.from_object( { u'port': 8080 } )``
:param settings: An object of type dictionary that contains the configurations
"""
if not isinstance(settings, dict):
raise TypeError(
"error: A settings dictionary of type dict is necesary"
)
self.__config = {**self.__config, **settings} | def from_object(self, settings: dict):
"""Set settings in based a dictionary, for example, if you want to
set a additional configuration you nedd pass:
``app.config.from_object( { u'port': 8080 } )``
:param settings: An object of type dictionary that contains the configurations
"""
if not isinstance(settings, dict):
raise TypeError(
"error: A settings dictionary of type dict is necesary"
)
self.__config = {**self.__config, **settings} |
Python | def clear(self):
"""Clear the actual settings, however, the settings from the environment
variables isn't clear. You can search variables in the environment with the function
``app.config.get("environment_name")``
"""
self.__config.clear() | def clear(self):
"""Clear the actual settings, however, the settings from the environment
variables isn't clear. You can search variables in the environment with the function
``app.config.get("environment_name")``
"""
self.__config.clear() |
Python | def urlencode(url: str):
"""This function is convenient when encoding a string to
use in a query part of a URL, as a convenient way to pass
variables to the next page.
:param url: URL to encode.
"""
key = urllib.parse.quote_plus(url)
return key | def urlencode(url: str):
"""This function is convenient when encoding a string to
use in a query part of a URL, as a convenient way to pass
variables to the next page.
:param url: URL to encode.
"""
key = urllib.parse.quote_plus(url)
return key |
Python | def slugify(text: str = ""):
"""Simplifies ugly strings into something URL-friendly.
:param text: Text to simplifies in ``str`` type
"""
# Reference: https://github.com/mikaelho/docgen/blob/master/docgen.py
# "[Some] _ Article's Title--"
# "[some] _ article's title--"
text = str(text).lower()
# "[some] _ article's_title--"
# "[some]___article's_title__"
for c in [' ', '-', '.', '/']:
text = text.replace(c, '_')
# "[some]___article's_title__"
# "some___articles_title__"
text = re.sub('\W', '', text)
# "some___articles_title__"
# "some articles title "
text = text.replace('_', ' ')
# "some articles title "
# "some articles title "
text = re.sub('\s+', ' ', text)
# "some articles title "
# "some articles title"
text = text.strip()
# "some articles title"
# "some-articles-title"
text = text.replace(' ', '-')
# delete acents
text = unicodedata.normalize("NFD", text)
text = text.encode("utf8").decode("ascii", "ignore")
text = urlencode(text)
return text | def slugify(text: str = ""):
"""Simplifies ugly strings into something URL-friendly.
:param text: Text to simplifies in ``str`` type
"""
# Reference: https://github.com/mikaelho/docgen/blob/master/docgen.py
# "[Some] _ Article's Title--"
# "[some] _ article's title--"
text = str(text).lower()
# "[some] _ article's_title--"
# "[some]___article's_title__"
for c in [' ', '-', '.', '/']:
text = text.replace(c, '_')
# "[some]___article's_title__"
# "some___articles_title__"
text = re.sub('\W', '', text)
# "some___articles_title__"
# "some articles title "
text = text.replace('_', ' ')
# "some articles title "
# "some articles title "
text = re.sub('\s+', ' ', text)
# "some articles title "
# "some articles title"
text = text.strip()
# "some articles title"
# "some-articles-title"
text = text.replace(' ', '-')
# delete acents
text = unicodedata.normalize("NFD", text)
text = text.encode("utf8").decode("ascii", "ignore")
text = urlencode(text)
return text |
Python | def _config(
self,
environ,
start_response,
set_response
):
"""Config instance of the Responses Class
:param environ: Represents a request from a web request.
:param start_response: Represents a response from a web request.
:param set_response: Response function for the request.
"""
self._environ = environ
self._start_response = start_response
self._set_response = set_response
# set default headers
self.set_headers('content-type', "application/json")
return self | def _config(
self,
environ,
start_response,
set_response
):
"""Config instance of the Responses Class
:param environ: Represents a request from a web request.
:param start_response: Represents a response from a web request.
:param set_response: Response function for the request.
"""
self._environ = environ
self._start_response = start_response
self._set_response = set_response
# set default headers
self.set_headers('content-type', "application/json")
return self |
Python | def bad_request(self, content: any = ""): # Response
"""This method response a *400 Bad Request* response to the client,
this indicate that the request is invalid.
This generally means that the request contained invalid parameters
or headers, or that you tried to do something that your application
logic does not support.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(400, content) | def bad_request(self, content: any = ""): # Response
"""This method response a *400 Bad Request* response to the client,
this indicate that the request is invalid.
This generally means that the request contained invalid parameters
or headers, or that you tried to do something that your application
logic does not support.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(400, content) |
Python | def forbidden(self, content: any = ""): # Response
"""This method is used to send a *403 Forbidden* response to the client,
indicating that a request is not authorized.
This generally means that the user agent tried to do something that they
were not authorized to do, such as changing another user's password.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(403, content) | def forbidden(self, content: any = ""): # Response
"""This method is used to send a *403 Forbidden* response to the client,
indicating that a request is not authorized.
This generally means that the user agent tried to do something that they
were not authorized to do, such as changing another user's password.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(403, content) |
Python | def not_found(self, content: any = ""): # Response
"""This method is used to send a *404 Not found* response.
When called manually from your application code, this method is normally used to
indicate that the user agent tried to find, update, or delete something that doesn't exist.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(404, content) | def not_found(self, content: any = ""): # Response
"""This method is used to send a *404 Not found* response.
When called manually from your application code, this method is normally used to
indicate that the user agent tried to find, update, or delete something that doesn't exist.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(404, content) |
Python | def ok(self, content: dict = None): # Response
"""This method is used to send a *200 OK* response to the client.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(200, content) | def ok(self, content: dict = None): # Response
"""This method is used to send a *200 OK* response to the client.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(200, content) |
Python | def server_error(self, content: any = ""):
"""This method is used to send a *500 Server Error* response to the client,
indicating that some kind of server error occurred.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(500, content) | def server_error(self, content: any = ""):
"""This method is used to send a *500 Server Error* response to the client,
indicating that some kind of server error occurred.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_by_status(500, content) |
Python | def send(self, content: any = None): # Response
"""Send a string response in a non-JSON format (XML, CSV, plain text, etc.).
This method is used in the underlying implementation of most other terminal response methods.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_string(content if content else {}) | def send(self, content: any = None): # Response
"""Send a string response in a non-JSON format (XML, CSV, plain text, etc.).
This method is used in the underlying implementation of most other terminal response methods.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
return self._send_string(content if content else {}) |
Python | def redirect(self, new_url: str): # Response
"""Redirect to another url with the actual request
:param new_url: URL to redirect.
"""
_result = RequestRedirect(new_url=new_url).get_response(self._environ)
return self._set_response(_result(self._environ, self._start_response)) | def redirect(self, new_url: str): # Response
"""Redirect to another url with the actual request
:param new_url: URL to redirect.
"""
_result = RequestRedirect(new_url=new_url).get_response(self._environ)
return self._set_response(_result(self._environ, self._start_response)) |
Python | def _send_string(self, content: str = ""): # Response
"""Send a response to http requests.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
self.set_data(jsonify(content))
return self._set_response(self(self._environ, self._start_response)) | def _send_string(self, content: str = ""): # Response
"""Send a response to http requests.
:param content: Information to send the client, a message, a dict, etc.
If it doesn't exist, it sends a status message from the status code.
"""
self.set_data(jsonify(content))
return self._set_response(self(self._environ, self._start_response)) |
Python | def path_regexp(path, keys, options):
""" Return a regular expression function for a specific route
:param path: Route to searching in the layer
:param keys: List of keys from the route
:param options: Options for the regexp expression
:return: A regular expression pattern string
"""
_reg_exp = re.compile(pattern(path, **options))
for _key in _reg_exp.groupindex:
keys.append({
u"name": _key,
u"optional": False
})
return _reg_exp | def path_regexp(path, keys, options):
""" Return a regular expression function for a specific route
:param path: Route to searching in the layer
:param keys: List of keys from the route
:param options: Options for the regexp expression
:return: A regular expression pattern string
"""
_reg_exp = re.compile(pattern(path, **options))
for _key in _reg_exp.groupindex:
keys.append({
u"name": _key,
u"optional": False
})
return _reg_exp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.