language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def scatter_nd(indices, updates, shape, target="cce"): """ Scatters input tensor updates to a new tensor according to indices. Args: indices(akg.tvm.Tensor): Tensor of type int32. updates(akg.tvm.Tensor): Tensor of type float16, float32, int32. shape(list, tuple): Specifies the shape of output tensor. Returns: Scattered tensor with same type as input tensor updates and shape specified by parameter shape. """ # check shapes dtype indices_shape = [x.value for x in indices.shape] data_shape = [x.value for x in updates.shape] utils.check_shape(indices_shape) utils.check_shape(data_shape) indices_dtype = indices.dtype if not indices_dtype in "int32": raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype) dtype = updates.dtype support_list = {"float16", "float32", "int32"} if not (dtype in support_list): raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype)) n = indices.shape[0].value def pick(i, j, *indexes): return akg.tvm.expr.Select(j == indices[i][0], akg.tvm.const(1, updates.dtype), akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes] reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc") k = akg.tvm.reduce_axis((0, n)) res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k)) return res
def scatter_nd(indices, updates, shape, target="cce"): """ Scatters input tensor updates to a new tensor according to indices. Args: indices(akg.tvm.Tensor): Tensor of type int32. updates(akg.tvm.Tensor): Tensor of type float16, float32, int32. shape(list, tuple): Specifies the shape of output tensor. Returns: Scattered tensor with same type as input tensor updates and shape specified by parameter shape. """ # check shapes dtype indices_shape = [x.value for x in indices.shape] data_shape = [x.value for x in updates.shape] utils.check_shape(indices_shape) utils.check_shape(data_shape) indices_dtype = indices.dtype if not indices_dtype in "int32": raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype) dtype = updates.dtype support_list = {"float16", "float32", "int32"} if not (dtype in support_list): raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype)) n = indices.shape[0].value def pick(i, j, *indexes): return akg.tvm.expr.Select(j == indices[i][0], akg.tvm.const(1, updates.dtype), akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes] reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc") k = akg.tvm.reduce_axis((0, n)) res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k)) return res
Python
def need_broadcast(main_data_shape, main_logical_shape, with_shape): """return False if main_data needn't to do broadcast""" if not with_shape: return False if not main_logical_shape: return False with_data_num = reduce(lambda x, y: x * y, with_shape) if with_data_num == 1: return False if main_logical_shape == with_shape: return False main_logical_shape_new = main_logical_shape if main_logical_shape else (1,) # No special broadcast is needed if there is no pad in data if main_logical_shape_new == main_data_shape: return False if len(main_logical_shape) >= len(with_shape): for i in range(0 - len(with_shape), 0): if main_logical_shape[i] < with_shape[i]: return True return False return True
def need_broadcast(main_data_shape, main_logical_shape, with_shape): """return False if main_data needn't to do broadcast""" if not with_shape: return False if not main_logical_shape: return False with_data_num = reduce(lambda x, y: x * y, with_shape) if with_data_num == 1: return False if main_logical_shape == with_shape: return False main_logical_shape_new = main_logical_shape if main_logical_shape else (1,) # No special broadcast is needed if there is no pad in data if main_logical_shape_new == main_data_shape: return False if len(main_logical_shape) >= len(with_shape): for i in range(0 - len(with_shape), 0): if main_logical_shape[i] < with_shape[i]: return True return False return True
Python
def broadcast_by_format(ori_data, logical_shape, format_in, with_shape): """ Do special broadcast for special formats when padding axis needs to broadcast, such as C in NCHW(NC1HWC0). Rewrite padding value to broadcast value in special case, for example: op1 * op2, where op1 and op2 are both NC1HWC0, and their logical 4D shapes are (4, 1, 3, 3) and (4, 4, 3, 3). op1's shape become (4, 1, 3, 3, 16) after transformation from 4D to NC1HWC0. we need to fill the data of axis C0 with broadcast value but not padding value. Note: There is no need to do broadcast for scalar and DefaultFormat(or NHWC) here. """ ori_data_shape = tuple(get_shape(ori_data)) if not need_broadcast(ori_data_shape, tuple(logical_shape), tuple(with_shape)): return ori_data nchw_shape_len = fracz_shape_len = 4 nc1hwc0_shape_len = 5 logical_shape_new = tuple(logical_shape) if logical_shape else (1,) data_num = reduce(lambda x, y: x * y, logical_shape_new) if data_num == 1: # this is a scalar if len(ori_data_shape) == fracz_shape_len: new_data = akg.tvm.compute((1,), lambda i: ori_data[0, 0, 0, i]) elif len(ori_data_shape) == nc1hwc0_shape_len: new_data = akg.tvm.compute((1,), lambda i: ori_data[0, 0, 0, 0, i]) else: raise RuntimeError("Unsupported shape {}".format(ori_data_shape)) return new_data # NC1HWC0 if format_in == NC1HWC0: if len(with_shape) != nchw_shape_len: raise ValueError("with_shape must be 4D, while it is {}".format(with_shape)) # rewrite padding value to broadcast value only if C(NCHW, NHWC is not considered) is the broadcast axis if logical_shape[1] == 1: new_data = akg.tvm.compute(ori_data_shape, lambda n, c1, h, w, c0: ori_data[n, c1, h, w, 0]) return new_data return ori_data raise RuntimeError("Broadcast is unsupported when logical_shape is {}, and format is {}". format(logical_shape, format_in))
def broadcast_by_format(ori_data, logical_shape, format_in, with_shape): """ Do special broadcast for special formats when padding axis needs to broadcast, such as C in NCHW(NC1HWC0). Rewrite padding value to broadcast value in special case, for example: op1 * op2, where op1 and op2 are both NC1HWC0, and their logical 4D shapes are (4, 1, 3, 3) and (4, 4, 3, 3). op1's shape become (4, 1, 3, 3, 16) after transformation from 4D to NC1HWC0. we need to fill the data of axis C0 with broadcast value but not padding value. Note: There is no need to do broadcast for scalar and DefaultFormat(or NHWC) here. """ ori_data_shape = tuple(get_shape(ori_data)) if not need_broadcast(ori_data_shape, tuple(logical_shape), tuple(with_shape)): return ori_data nchw_shape_len = fracz_shape_len = 4 nc1hwc0_shape_len = 5 logical_shape_new = tuple(logical_shape) if logical_shape else (1,) data_num = reduce(lambda x, y: x * y, logical_shape_new) if data_num == 1: # this is a scalar if len(ori_data_shape) == fracz_shape_len: new_data = akg.tvm.compute((1,), lambda i: ori_data[0, 0, 0, i]) elif len(ori_data_shape) == nc1hwc0_shape_len: new_data = akg.tvm.compute((1,), lambda i: ori_data[0, 0, 0, 0, i]) else: raise RuntimeError("Unsupported shape {}".format(ori_data_shape)) return new_data # NC1HWC0 if format_in == NC1HWC0: if len(with_shape) != nchw_shape_len: raise ValueError("with_shape must be 4D, while it is {}".format(with_shape)) # rewrite padding value to broadcast value only if C(NCHW, NHWC is not considered) is the broadcast axis if logical_shape[1] == 1: new_data = akg.tvm.compute(ori_data_shape, lambda n, c1, h, w, c0: ori_data[n, c1, h, w, 0]) return new_data return ori_data raise RuntimeError("Broadcast is unsupported when logical_shape is {}, and format is {}". format(logical_shape, format_in))
Python
def traceback(self): """ The traceback module prints out the details of the case execution failure. """ self.log.error("There are something error appear.") traceback.print_exc()
def traceback(self): """ The traceback module prints out the details of the case execution failure. """ self.log.error("There are something error appear.") traceback.print_exc()
Python
def compare_tensor(acu_output, exp_output, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Output and expected result comparison method :param acu_output: array_like Input arrays to compare. :param exp_output: array_like Input arrays to compare. :param rtol: float The relative tolerance parameter (see Notes). :param atol: float The absolute tolerance parameter (see Notes). :param equal_nan: bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. :return: True / False """ res = np.allclose(acu_output, exp_output, rtol, atol, equal_nan) if not res: pandora_logger_ = Log(case_name=os.path.dirname( __file__), case_path=os.getcwd()) pandora_logger_.log.error( "This shape precision is not up to standard, compare failed.") return res
def compare_tensor(acu_output, exp_output, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Output and expected result comparison method :param acu_output: array_like Input arrays to compare. :param exp_output: array_like Input arrays to compare. :param rtol: float The relative tolerance parameter (see Notes). :param atol: float The absolute tolerance parameter (see Notes). :param equal_nan: bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. :return: True / False """ res = np.allclose(acu_output, exp_output, rtol, atol, equal_nan) if not res: pandora_logger_ = Log(case_name=os.path.dirname( __file__), case_path=os.getcwd()) pandora_logger_.log.error( "This shape precision is not up to standard, compare failed.") return res
Python
def precheck(desc): """ This utils is used to: 1. Run a precheck for those testing cases that have only element-wise computations and then 2. Return a reasonable mean value for generating random Gaussian input data. to avoid the precision error caused by computing division by zero, the reciprocal of zero or the root squared of zero. """ elemwise_op_func_map = { "Neg": lambda a: -a, "Abs": lambda a: abs(a), "Cast": lambda a: a, "Log": lambda a: math.log(a), "Exp": lambda a: math.exp(a), "Sqrt": lambda a: math.sqrt(a), "Rsqrt": lambda a: 1/math.sqrt(a), "Reciprocal": lambda a: 1/a, "Square": lambda a: a**2, "Add": lambda a, b: a+b, "Sub": lambda a, b: a-b, "Mul": lambda a, b: a*b, "RealDiv": lambda a, b: a/b, "Minimum": lambda a, b: min(a, b), "Maximum": lambda a, b: max(a, b), "Pow": lambda a, b: pow(a, b) } stop_forward = set() variable = dict() def update_stop_forward(out_desc): for out_tensor in out_desc: stop_forward.add(out_tensor['tensor_name']) def need_jump(op_desc): for in_desc in op_desc['input_desc']: for in_tensor in in_desc: if in_tensor['tensor_name'] in stop_forward: update_stop_forward(op_desc['output_desc']) return True return False def fill_input_value(input_desc, input_value): inputs = [] for in_desc in input_desc: for in_tensor in in_desc: if "value" in in_tensor: val = in_tensor["value"] elif in_tensor['tensor_name'] in variable: val = variable[in_tensor['tensor_name']] else: val = input_value inputs.append(val) return inputs def compute_math(op_name, inputs, input_value): if op_name == "Rsqrt" and abs(inputs[0]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero has no square root".format(input_value)) return None elif op_name == "Reciprocal" and abs(inputs[0]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero has no reciprocal".format(input_value)) return None elif op_name == "RealDiv" and abs(inputs[1]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero cannot be a divisor".format(input_value)) return None else: return elemwise_op_func_map[op_name](*inputs) def check_pass(input_value): for op_desc in desc['op_desc']: if op_desc['name'] not in elemwise_op_func_map: update_stop_forward(op_desc['output_desc']) elif not need_jump(op_desc): inputs = fill_input_value(op_desc['input_desc'], input_value) output = op_desc['output_desc'][0]['tensor_name'] if compute_math(op_desc['name'], inputs, input_value) is None: return False variable[output] = compute_math( op_desc['name'], inputs, input_value) return True initial_input = 1 while not check_pass(initial_input): initial_input += 1 if initial_input > 20: logging.info( "Input mean value check failed! Just use mean value 1. Precision error may occur! ") return 1 logging.info( "Input data with mean value {} is generated".format(initial_input)) return initial_input
def precheck(desc): """ This utils is used to: 1. Run a precheck for those testing cases that have only element-wise computations and then 2. Return a reasonable mean value for generating random Gaussian input data. to avoid the precision error caused by computing division by zero, the reciprocal of zero or the root squared of zero. """ elemwise_op_func_map = { "Neg": lambda a: -a, "Abs": lambda a: abs(a), "Cast": lambda a: a, "Log": lambda a: math.log(a), "Exp": lambda a: math.exp(a), "Sqrt": lambda a: math.sqrt(a), "Rsqrt": lambda a: 1/math.sqrt(a), "Reciprocal": lambda a: 1/a, "Square": lambda a: a**2, "Add": lambda a, b: a+b, "Sub": lambda a, b: a-b, "Mul": lambda a, b: a*b, "RealDiv": lambda a, b: a/b, "Minimum": lambda a, b: min(a, b), "Maximum": lambda a, b: max(a, b), "Pow": lambda a, b: pow(a, b) } stop_forward = set() variable = dict() def update_stop_forward(out_desc): for out_tensor in out_desc: stop_forward.add(out_tensor['tensor_name']) def need_jump(op_desc): for in_desc in op_desc['input_desc']: for in_tensor in in_desc: if in_tensor['tensor_name'] in stop_forward: update_stop_forward(op_desc['output_desc']) return True return False def fill_input_value(input_desc, input_value): inputs = [] for in_desc in input_desc: for in_tensor in in_desc: if "value" in in_tensor: val = in_tensor["value"] elif in_tensor['tensor_name'] in variable: val = variable[in_tensor['tensor_name']] else: val = input_value inputs.append(val) return inputs def compute_math(op_name, inputs, input_value): if op_name == "Rsqrt" and abs(inputs[0]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero has no square root".format(input_value)) return None elif op_name == "Reciprocal" and abs(inputs[0]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero has no reciprocal".format(input_value)) return None elif op_name == "RealDiv" and abs(inputs[1]) <= 0.01: logging.info( "The input with mean value {} fails the precheck because zero cannot be a divisor".format(input_value)) return None else: return elemwise_op_func_map[op_name](*inputs) def check_pass(input_value): for op_desc in desc['op_desc']: if op_desc['name'] not in elemwise_op_func_map: update_stop_forward(op_desc['output_desc']) elif not need_jump(op_desc): inputs = fill_input_value(op_desc['input_desc'], input_value) output = op_desc['output_desc'][0]['tensor_name'] if compute_math(op_desc['name'], inputs, input_value) is None: return False variable[output] = compute_math( op_desc['name'], inputs, input_value) return True initial_input = 1 while not check_pass(initial_input): initial_input += 1 if initial_input > 20: logging.info( "Input mean value check failed! Just use mean value 1. Precision error may occur! ") return 1 logging.info( "Input data with mean value {} is generated".format(initial_input)) return initial_input
Python
def func(size_, miu_=0, sigma_=8, seed_=None): """ Select random func according to RANDOM_FUNC_MODE and randint, calculated by the length of the random_func_list. Args: size_ (int): Size of data. miu_ (int): Mean value. Default: 0. sigma_ (int): Standard deviation. Default: 8. seed_ (int): seed for random. Returns: Random func, from random_func_list. """ size_ = (size_ + RANDOM_SEED_NUM - 1) // RANDOM_SEED_NUM random_func_list = [ np.random.RandomState(seed_).normal(miu_, sigma_, size_), np.random.RandomState(seed_).logistic(miu_, sigma_, size_), np.random.RandomState(seed_).laplace(miu_, sigma_, size_), np.random.RandomState(seed_).uniform(miu_, sigma_, size_), np.random.RandomState(seed_).tomaxint(size_), ] env_dic = os.environ if not env_dic.get('RANDOM_FUNC_MODE'): func_idx = 0 else: func_idx = np.random.RandomState(None).randint(len(random_func_list)) res = random_func_list[func_idx] return res
def func(size_, miu_=0, sigma_=8, seed_=None): """ Select random func according to RANDOM_FUNC_MODE and randint, calculated by the length of the random_func_list. Args: size_ (int): Size of data. miu_ (int): Mean value. Default: 0. sigma_ (int): Standard deviation. Default: 8. seed_ (int): seed for random. Returns: Random func, from random_func_list. """ size_ = (size_ + RANDOM_SEED_NUM - 1) // RANDOM_SEED_NUM random_func_list = [ np.random.RandomState(seed_).normal(miu_, sigma_, size_), np.random.RandomState(seed_).logistic(miu_, sigma_, size_), np.random.RandomState(seed_).laplace(miu_, sigma_, size_), np.random.RandomState(seed_).uniform(miu_, sigma_, size_), np.random.RandomState(seed_).tomaxint(size_), ] env_dic = os.environ if not env_dic.get('RANDOM_FUNC_MODE'): func_idx = 0 else: func_idx = np.random.RandomState(None).randint(len(random_func_list)) res = random_func_list[func_idx] return res
Python
def random_gaussian(size, miu=0, sigma=8, epsilon=0, seed=None): """Generate random array with absolution value obeys gaussian distribution.""" random_data_disk_path = None if os.environ.get("RANDOM_DATA_DISK_PATH") is not None: random_data_disk_path = os.environ.get( "RANDOM_DATA_DISK_PATH") + "/random_data_%s_%s.bin" % (str(miu), str(sigma)) if random_data_disk_path is None or (not os.path.exists(random_data_disk_path)): if sigma <= 0: sys.stderr.write( "Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma) sys.exit(1) size_c = 1 for x in size: size_c = size_c * x if seed is None: seed_ = [] for i in range(RANDOM_SEED_NUM): now = int(time.time() % 10000 * 10000) + random.randint(i, 100) seed_.append(now) else: seed_ = [seed] * RANDOM_SEED_NUM logging.debug("random_gaussian seeds: {}".format(seed_)) # In the profiling scenario, when a new process is used to run test cases, data generated by multiple processes # stops responding. To locate the fault, please set this parameter gen_data_multi_process to False. gen_data_multi_process = not bool(get_profiling_mode()) if gen_data_multi_process: with Pool(processes=8) as pool: ret = np.array(pool.starmap( func, zip(repeat(size_c), repeat(miu), repeat(sigma), seed_))) else: numbers = list() for s in seed_: numbers.extend(func(size_c, miu, sigma, s)) ret = np.array(numbers) ret = ret.flatten() return ret[:size_c].reshape(size) + epsilon data_len = functools.reduce(lambda x, y: x * y, size) data_pool = np.fromfile(random_data_disk_path) if data_len % len(data_pool) != 0: copy_num = (data_len // len(data_pool)) + 1 else: copy_num = data_len // len(data_pool) data_copy = np.copy(data_pool) data_copy_list = [] for _ in range(copy_num): np.random.shuffle(data_copy) data_copy_list.append(data_copy) data_pool = np.concatenate(tuple(data_copy_list), axis=0) return data_pool[0:data_len].reshape(size) + epsilon
def random_gaussian(size, miu=0, sigma=8, epsilon=0, seed=None): """Generate random array with absolution value obeys gaussian distribution.""" random_data_disk_path = None if os.environ.get("RANDOM_DATA_DISK_PATH") is not None: random_data_disk_path = os.environ.get( "RANDOM_DATA_DISK_PATH") + "/random_data_%s_%s.bin" % (str(miu), str(sigma)) if random_data_disk_path is None or (not os.path.exists(random_data_disk_path)): if sigma <= 0: sys.stderr.write( "Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma) sys.exit(1) size_c = 1 for x in size: size_c = size_c * x if seed is None: seed_ = [] for i in range(RANDOM_SEED_NUM): now = int(time.time() % 10000 * 10000) + random.randint(i, 100) seed_.append(now) else: seed_ = [seed] * RANDOM_SEED_NUM logging.debug("random_gaussian seeds: {}".format(seed_)) # In the profiling scenario, when a new process is used to run test cases, data generated by multiple processes # stops responding. To locate the fault, please set this parameter gen_data_multi_process to False. gen_data_multi_process = not bool(get_profiling_mode()) if gen_data_multi_process: with Pool(processes=8) as pool: ret = np.array(pool.starmap( func, zip(repeat(size_c), repeat(miu), repeat(sigma), seed_))) else: numbers = list() for s in seed_: numbers.extend(func(size_c, miu, sigma, s)) ret = np.array(numbers) ret = ret.flatten() return ret[:size_c].reshape(size) + epsilon data_len = functools.reduce(lambda x, y: x * y, size) data_pool = np.fromfile(random_data_disk_path) if data_len % len(data_pool) != 0: copy_num = (data_len // len(data_pool)) + 1 else: copy_num = data_len // len(data_pool) data_copy = np.copy(data_pool) data_copy_list = [] for _ in range(copy_num): np.random.shuffle(data_copy) data_copy_list.append(data_copy) data_pool = np.concatenate(tuple(data_copy_list), axis=0) return data_pool[0:data_len].reshape(size) + epsilon
Python
def strided_slice_grad_tiling_strategy(tensor, begin, end, strides): """Custom tiling strategy for strided slice grad op.""" strategy = list() for i, shp in enumerate(tensor.shape): length = end[i] - begin[i] if length <= strides[i] or int(shp) % strides[i] != 0: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", tensor_pos=i, constraints=ct_util.TileConstraint.MAX) return strategy
def strided_slice_grad_tiling_strategy(tensor, begin, end, strides): """Custom tiling strategy for strided slice grad op.""" strategy = list() for i, shp in enumerate(tensor.shape): length = end[i] - begin[i] if length <= strides[i] or int(shp) % strides[i] != 0: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", tensor_pos=i, constraints=ct_util.TileConstraint.MAX) return strategy
Python
def Pow(data1, data2, target=utils.CCE): """ Computes power(data1,data2) elementwise, broadcast is supported. Args: data1 (tvm.tensor.Tensor): Tensor. data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen. Returns: tvm.tensor.Tensor, powered result, with same type as input tensors and broadcasted shape of data1 and data2. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _pow_ascend(data1, data2, target) else: return _pow(data1, data2)
def Pow(data1, data2, target=utils.CCE): """ Computes power(data1,data2) elementwise, broadcast is supported. Args: data1 (tvm.tensor.Tensor): Tensor. data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen. Returns: tvm.tensor.Tensor, powered result, with same type as input tensors and broadcasted shape of data1 and data2. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _pow_ascend(data1, data2, target) else: return _pow(data1, data2)
Python
def minimum_ad(head, data_x, data_y, grad_x=True, grad_y=True): """ Calculating the reversed outputs of the operator minimum by using automatic differentiate. Args: head (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. data_x (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. data_y (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. grad_x (bool): Default is True, whether to differentiate x. grad_y (bool): Default is True, whether to differentiate y. Returns: tvm.tensor.Tensor, has the same type and shape as grads, if grad_x and grad_y all equal to True, need return a list like: [jacs[0], jacs[1]]. """ utils.elemwise_shape_check(data_x.shape, data_y.shape) utils.elemwise_shape_check(head.shape, data_x.shape) utils.elemwise_dtype_check(data_x.dtype, head.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) utils.elemwise_dtype_check(data_x.dtype, data_y.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) if not grad_x and not grad_y: raise ValueError("At least one of grad_x and grad_y is True.") op = Minimum(data_x, data_y) jacs = list(akg.differentiate(op, [data_x, data_y], head)) if grad_x and grad_y: return jacs[0], jacs[1] if grad_x: return jacs[0] return jacs[1]
def minimum_ad(head, data_x, data_y, grad_x=True, grad_y=True): """ Calculating the reversed outputs of the operator minimum by using automatic differentiate. Args: head (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. data_x (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. data_y (tvm.tensor.Tensor): Input tensor of float32, float16 and int32. grad_x (bool): Default is True, whether to differentiate x. grad_y (bool): Default is True, whether to differentiate y. Returns: tvm.tensor.Tensor, has the same type and shape as grads, if grad_x and grad_y all equal to True, need return a list like: [jacs[0], jacs[1]]. """ utils.elemwise_shape_check(data_x.shape, data_y.shape) utils.elemwise_shape_check(head.shape, data_x.shape) utils.elemwise_dtype_check(data_x.dtype, head.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) utils.elemwise_dtype_check(data_x.dtype, data_y.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) if not grad_x and not grad_y: raise ValueError("At least one of grad_x and grad_y is True.") op = Minimum(data_x, data_y) jacs = list(akg.differentiate(op, [data_x, data_y], head)) if grad_x and grad_y: return jacs[0], jacs[1] if grad_x: return jacs[0] return jacs[1]
Python
def ones_like(input): """ Generate an array of ones. Args: input (tvm.tensor.Tensor): Tensor,Should be of type float16, float32, int32, uint8, int8. Returns: tvm.tensor.Tensor with the same type and shape as input. """ dtype = input.dtype shape = get_shape(input) utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_TYPES]) utils.check_shape(shape) res = akg.tvm.compute(shape, lambda *i: akg.tvm.const(1, "float16"), name="res", attrs={'no_inline': 1}) res = Cast(res, dtype, target=utils.CCE) return res
def ones_like(input): """ Generate an array of ones. Args: input (tvm.tensor.Tensor): Tensor,Should be of type float16, float32, int32, uint8, int8. Returns: tvm.tensor.Tensor with the same type and shape as input. """ dtype = input.dtype shape = get_shape(input) utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.ALL_TYPES]) utils.check_shape(shape) res = akg.tvm.compute(shape, lambda *i: akg.tvm.const(1, "float16"), name="res", attrs={'no_inline': 1}) res = Cast(res, dtype, target=utils.CCE) return res
Python
def crossentropyloss_ad(head, labels, logits, target="cce"): """Compute gradient of crossentropy_loss operator using automatic differentiate.""" loss = crossentropyloss.crossentropyloss(labels, logits, axis=-1) jacs = list(akg.differentiate(loss, [logits], head)) return jacs[0]
def crossentropyloss_ad(head, labels, logits, target="cce"): """Compute gradient of crossentropy_loss operator using automatic differentiate.""" loss = crossentropyloss.crossentropyloss(labels, logits, axis=-1) jacs = list(akg.differentiate(loss, [logits], head)) return jacs[0]
Python
def _sqrt_mini_vsqrt_newton_iter(x): """sqrt compute on mini with the Newton's Iteration of vrsqrt""" def vsqrt_newton_iter(data): """compute vsqrt with newton_iter""" data_rsqrt = akg.topi.rsqrt(data) # vrsqrt newton_iter: x(n+1) = x(n)*(3-a*x(n)^2)/2 steps = 3 half = akg.tvm.const(0.5, x.dtype) shape = data.shape for i in range(steps): data_rsqrt = akg.tvm.compute(shape, lambda *indice: half * data_rsqrt(*indice) * (3 - data(*indice) * data_rsqrt(*indice) * data_rsqrt(*indice)), name="data_rsqrt_%s" % i) return data_rsqrt x_rsqrt = vsqrt_newton_iter(x) x_sqrt = akg.topi.multiply(x, x_rsqrt) return x_sqrt
def _sqrt_mini_vsqrt_newton_iter(x): """sqrt compute on mini with the Newton's Iteration of vrsqrt""" def vsqrt_newton_iter(data): """compute vsqrt with newton_iter""" data_rsqrt = akg.topi.rsqrt(data) # vrsqrt newton_iter: x(n+1) = x(n)*(3-a*x(n)^2)/2 steps = 3 half = akg.tvm.const(0.5, x.dtype) shape = data.shape for i in range(steps): data_rsqrt = akg.tvm.compute(shape, lambda *indice: half * data_rsqrt(*indice) * (3 - data(*indice) * data_rsqrt(*indice) * data_rsqrt(*indice)), name="data_rsqrt_%s" % i) return data_rsqrt x_rsqrt = vsqrt_newton_iter(x) x_sqrt = akg.topi.multiply(x, x_rsqrt) return x_sqrt
Python
def Maximum(data1, data2, target=utils.CCE): """ Take element-wise maximum of two tensors with auto-broadcasting. Args: data1: tvm.tensor.Tensor data2: tvm.tensor.Tensor Returns: tvm.tensor.Tensor of maximum of two tensors. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] utils.check_shape(shape1) utils.check_shape(shape2) utils.auto_broadcast_check(shape1, shape2) utils.elemwise_dtype_check(data1.dtype, data2.dtype) dtype = data1.dtype need_cast = True if target == utils.CCE and dtype in ["int8", "uint8"] else False if need_cast: data1 = topi.cast(data1, "float16") data2 = topi.cast(data2, "float16") res = topi.maximum(data1, data2) if need_cast: res = topi.cast(res, dtype) return res
def Maximum(data1, data2, target=utils.CCE): """ Take element-wise maximum of two tensors with auto-broadcasting. Args: data1: tvm.tensor.Tensor data2: tvm.tensor.Tensor Returns: tvm.tensor.Tensor of maximum of two tensors. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape1 = [x.value for x in data1.shape] shape2 = [x.value for x in data2.shape] utils.check_shape(shape1) utils.check_shape(shape2) utils.auto_broadcast_check(shape1, shape2) utils.elemwise_dtype_check(data1.dtype, data2.dtype) dtype = data1.dtype need_cast = True if target == utils.CCE and dtype in ["int8", "uint8"] else False if need_cast: data1 = topi.cast(data1, "float16") data2 = topi.cast(data2, "float16") res = topi.maximum(data1, data2) if need_cast: res = topi.cast(res, dtype) return res
Python
def Sum(inputs, axis=None, keepdims=False, target=utils.CCE): """ Compute the sum of elements across dimensions of a tensor. Args: inputs (tvm.tensor.Tensor): Tensor. axis (Union[list, tuple, int, None]): If the list or tuple is empty, the axis equal to None. keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length. Returns: tvm.tensor.Tensor, has same type as input. If keepdims is True, all reduced dimensions are retained with length 1, else these reduced axis will be eliminate. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ # Check types if target == utils.CCE: dtype = inputs.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) axis = ft_util.refine_reduce_axis(inputs, axis) utils.check_shape(inputs.shape) if not axis: output = akg.topi.identity(inputs) else: output = akg.topi.sum(inputs, axis=axis, keepdims=keepdims) return output
def Sum(inputs, axis=None, keepdims=False, target=utils.CCE): """ Compute the sum of elements across dimensions of a tensor. Args: inputs (tvm.tensor.Tensor): Tensor. axis (Union[list, tuple, int, None]): If the list or tuple is empty, the axis equal to None. keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length. Returns: tvm.tensor.Tensor, has same type as input. If keepdims is True, all reduced dimensions are retained with length 1, else these reduced axis will be eliminate. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ # Check types if target == utils.CCE: dtype = inputs.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) axis = ft_util.refine_reduce_axis(inputs, axis) utils.check_shape(inputs.shape) if not axis: output = akg.topi.identity(inputs) else: output = akg.topi.sum(inputs, axis=axis, keepdims=keepdims) return output
Python
def sgd(parameters, gradient, accum, stat, learning_rate, momentum, dampening=0.0, weight_decay=0.0, nesterov=False): """ Update parameters, accum and stat according to the SGD algorithm. accum = accum * momentum + grad if use_nesterov is True: parameters -= grad * lr + accum * momentum * lr else: parameters -= accum * lr Args: parameters (tvm.tensor.Tensor): parameters tensor of float32, float16, to be updated. gradient (tvm.tensor.Tensor): gradient tensor of float32, float16. accum (tvm.tensor.Tensor): accum tensor of float32, float16, to be updated. stat (tvm.tensor.Tensor): stat tensor of float32, float16, to be updated. momentum (tvm.tensor.Tensor): momentum tensor of float32, float16, shape must be equal to (1,). learning_rate (tvm.tensor.Tensor): learning_rate tensor of float32, float16, shape must be equal to (1,). dampening (float): Default value is 0.0. weight_decay (float): Default value is 0.0. nesterov (bool): Default is False. Return: accum_t (tvm.tensor.Tensor): updated accum with same type and shape as accum. stat_t (tvm.tensor.Tensor): updated stat with same type and shape as stat. parameters_t (tvm.tensor.Tensor): updated parameters with same type and shape as parameters. """ if nesterov and dampening != 0: raise ValueError("Nesterov requires zero dampening!") if weight_decay < 0: raise ValueError("weight_decay must > 0.") # shape check utils.elemwise_shape_check(parameters.shape, gradient.shape) utils.elemwise_shape_check(parameters.shape, accum.shape) utils.elemwise_shape_check(parameters.shape, stat.shape) # dtype check utils.ops_dtype_check([parameters.dtype, gradient.dtype, accum.dtype, stat.dtype], utils.DtypeForDavinci.ALL_FLOAT) parameters_t, accum_t, stat_t = sgd_compute(parameters, gradient, learning_rate, accum, momentum, stat, dampening, weight_decay, nesterov) parameters_t, binds_info = TensorUtils.inplace_set(parameters, parameters_t, "parameters_buf") accum_t, binds_info2 = TensorUtils.inplace_set(accum, accum_t, "accum_buf") stat_t, binds_info3 = TensorUtils.inplace_set(stat, stat_t, "stat_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return parameters_t, accum_t, stat_t, attrs
def sgd(parameters, gradient, accum, stat, learning_rate, momentum, dampening=0.0, weight_decay=0.0, nesterov=False): """ Update parameters, accum and stat according to the SGD algorithm. accum = accum * momentum + grad if use_nesterov is True: parameters -= grad * lr + accum * momentum * lr else: parameters -= accum * lr Args: parameters (tvm.tensor.Tensor): parameters tensor of float32, float16, to be updated. gradient (tvm.tensor.Tensor): gradient tensor of float32, float16. accum (tvm.tensor.Tensor): accum tensor of float32, float16, to be updated. stat (tvm.tensor.Tensor): stat tensor of float32, float16, to be updated. momentum (tvm.tensor.Tensor): momentum tensor of float32, float16, shape must be equal to (1,). learning_rate (tvm.tensor.Tensor): learning_rate tensor of float32, float16, shape must be equal to (1,). dampening (float): Default value is 0.0. weight_decay (float): Default value is 0.0. nesterov (bool): Default is False. Return: accum_t (tvm.tensor.Tensor): updated accum with same type and shape as accum. stat_t (tvm.tensor.Tensor): updated stat with same type and shape as stat. parameters_t (tvm.tensor.Tensor): updated parameters with same type and shape as parameters. """ if nesterov and dampening != 0: raise ValueError("Nesterov requires zero dampening!") if weight_decay < 0: raise ValueError("weight_decay must > 0.") # shape check utils.elemwise_shape_check(parameters.shape, gradient.shape) utils.elemwise_shape_check(parameters.shape, accum.shape) utils.elemwise_shape_check(parameters.shape, stat.shape) # dtype check utils.ops_dtype_check([parameters.dtype, gradient.dtype, accum.dtype, stat.dtype], utils.DtypeForDavinci.ALL_FLOAT) parameters_t, accum_t, stat_t = sgd_compute(parameters, gradient, learning_rate, accum, momentum, stat, dampening, weight_decay, nesterov) parameters_t, binds_info = TensorUtils.inplace_set(parameters, parameters_t, "parameters_buf") accum_t, binds_info2 = TensorUtils.inplace_set(accum, accum_t, "accum_buf") stat_t, binds_info3 = TensorUtils.inplace_set(stat, stat_t, "stat_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return parameters_t, accum_t, stat_t, attrs
Python
def quantized_avgpool_tiling_strategy(data, kernel, stride, pad, quant_algo): """Custom tiling for quantized avgpool.""" batch, c_1, fm_h, fm_w, c_0 = get_shape(data) _, [out_h, out_w] = \ cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad) strategy = list() if c_0 == 16: h_cut = out_h if fm_h >= 50 and fm_w >= 50: h_cut = 3 dim_ind = 0 tiling_params = list() if batch > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 if c_1 > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 tiling_params.append([h_cut, ct_util.TileConstraint.FACTOR, dim_ind]) tiling_params.append(["H", ct_util.TileConstraint.SET_AXIS_INFO, dim_ind]) tiling_params.append([out_w, ct_util.TileConstraint.FACTOR, dim_ind + 1]) if quant_algo is not None: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 2]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 4]) else: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 4]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 2]) for para in tiling_params: strategy += ct_util.create_constraint_on_axis( values=para[0], constraints=para[1], axis=para[2]) return strategy
def quantized_avgpool_tiling_strategy(data, kernel, stride, pad, quant_algo): """Custom tiling for quantized avgpool.""" batch, c_1, fm_h, fm_w, c_0 = get_shape(data) _, [out_h, out_w] = \ cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad) strategy = list() if c_0 == 16: h_cut = out_h if fm_h >= 50 and fm_w >= 50: h_cut = 3 dim_ind = 0 tiling_params = list() if batch > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 if c_1 > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 tiling_params.append([h_cut, ct_util.TileConstraint.FACTOR, dim_ind]) tiling_params.append(["H", ct_util.TileConstraint.SET_AXIS_INFO, dim_ind]) tiling_params.append([out_w, ct_util.TileConstraint.FACTOR, dim_ind + 1]) if quant_algo is not None: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 2]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 4]) else: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 4]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 2]) for para in tiling_params: strategy += ct_util.create_constraint_on_axis( values=para[0], constraints=para[1], axis=para[2]) return strategy
Python
def SumV2(inputs, axis=None, keepdims=True, target=utils.CCE): """ another implementation of sum with topi api. Supported Platforms: 'Ascend' """ if target != utils.CCE: raise RuntimeError('operator not supported on %s' % utils.get_backend(target)) dtype = inputs.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) axis = ft_util.refine_reduce_axis(inputs, axis) utils.check_shape(inputs.shape) if not axis: output = akg.topi.identity(inputs) else: if dtype == "float16": step_sum = Cast(inputs, "float32", target) else: step_sum = inputs step_sum = akg.topi.sum(step_sum, axis=axis, keepdims=keepdims) if dtype == "float16": output = Cast(step_sum, "float16", target) else: output = step_sum return output
def SumV2(inputs, axis=None, keepdims=True, target=utils.CCE): """ another implementation of sum with topi api. Supported Platforms: 'Ascend' """ if target != utils.CCE: raise RuntimeError('operator not supported on %s' % utils.get_backend(target)) dtype = inputs.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) axis = ft_util.refine_reduce_axis(inputs, axis) utils.check_shape(inputs.shape) if not axis: output = akg.topi.identity(inputs) else: if dtype == "float16": step_sum = Cast(inputs, "float32", target) else: step_sum = inputs step_sum = akg.topi.sum(step_sum, axis=axis, keepdims=keepdims) if dtype == "float16": output = Cast(step_sum, "float16", target) else: output = step_sum return output
Python
def SumByShape(broadcast_data, original_shape, target=utils.CCE): """ sum the broadcast_data by original shape; gradient for Broadcast. Supported Platforms: 'Ascend' """ if target != utils.CCE: raise RuntimeError('operator not supported on %s' % utils.get_backend(target)) broadcast_shape = get_shape(broadcast_data) original_shape = get_shape(original_shape) if broadcast_shape == original_shape: return broadcast_data if original_shape == [1]: data = Sum(broadcast_data, target=target) return data utils.broadcast_check(original_shape, broadcast_shape) axis_len = len(broadcast_shape) - len(original_shape) if axis_len > 0: axis = list(range(axis_len)) broadcast_data = Sum(broadcast_data, axis, False, target=target) broadcast_shape = get_shape(broadcast_data) axis = [] for i, _ in enumerate(original_shape): if original_shape[i] != broadcast_shape[i]: axis.append(i) res = Sum(broadcast_data, axis, True, target=target)[0] if axis else broadcast_data return res
def SumByShape(broadcast_data, original_shape, target=utils.CCE): """ sum the broadcast_data by original shape; gradient for Broadcast. Supported Platforms: 'Ascend' """ if target != utils.CCE: raise RuntimeError('operator not supported on %s' % utils.get_backend(target)) broadcast_shape = get_shape(broadcast_data) original_shape = get_shape(original_shape) if broadcast_shape == original_shape: return broadcast_data if original_shape == [1]: data = Sum(broadcast_data, target=target) return data utils.broadcast_check(original_shape, broadcast_shape) axis_len = len(broadcast_shape) - len(original_shape) if axis_len > 0: axis = list(range(axis_len)) broadcast_data = Sum(broadcast_data, axis, False, target=target) broadcast_shape = get_shape(broadcast_data) axis = [] for i, _ in enumerate(original_shape): if original_shape[i] != broadcast_shape[i]: axis.append(i) res = Sum(broadcast_data, axis, True, target=target)[0] if axis else broadcast_data return res
Python
def bn1_tiling_strategy(tensor): """Custom tiling strategy for first part of splited fused_batch_norm op""" # bn1 input [N, C1, H, W, C0] n_pos = 0 c0_pos = 4 strategy = list() strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=n_pos) strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=c0_pos) return strategy
def bn1_tiling_strategy(tensor): """Custom tiling strategy for first part of splited fused_batch_norm op""" # bn1 input [N, C1, H, W, C0] n_pos = 0 c0_pos = 4 strategy = list() strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=n_pos) strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=c0_pos) return strategy
Python
def bn1_check(data): """check bn1 func's parameters availability for fused_bn1""" shape = get_shape(data) dtype = data.dtype if len(shape) != 5: raise RuntimeError("Only support 5D data, " "but get {}!".format(shape)) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
def bn1_check(data): """check bn1 func's parameters availability for fused_bn1""" shape = get_shape(data) dtype = data.dtype if len(shape) != 5: raise RuntimeError("Only support 5D data, " "but get {}!".format(shape)) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
Python
def inplace_operate_bind(in_tensors, out_tensors, inplace_binds): """ Some tensor need to be calculate inplace. Args: in_tensors (Union[list, tuple]): Origin input tensors. out_tensors (Union[list, tuple]): Origin output tensors. inplace_binds (tuple): Should be a tuple of tuples, the first value of each element is input tensor index, the second is output tensor index, consist (in_id, out_id), meanning out_id output tensor is inplace update to in_id input tensor. Returns: Two elements tuple, one for output tensors, the other for tensor bind relations. """ for in_id, out_id in inplace_binds: if in_id >= len(in_tensors) or out_id >= len(out_tensors): raise RuntimeError("Inplace binds is invalid, while there are {} " "input tensors and {} output tensors, but get " "bind {}.".format( len(in_tensors), len(out_tensors), inplace_binds)) out_tensors = list(out_tensors) tensor_binds = {} inplaced_tensors = [] for i, bind in enumerate(inplace_binds): in_tensor = in_tensors[bind[0]] out_tensor = out_tensors[bind[1]] out_tensor, binds_info = TensorUtils.inplace_set( in_tensor, out_tensor, buffer_name="inp_buf_{}".format(i)) tensor_binds.update(binds_info) # Caculation is updated inplace in input tensor. But Mindspore # needs a related fake tensor(never use) in output list... out_tensor_shape = out_tensor.shape fake_tensor = akg.tvm.compute( out_tensor_shape, lambda *index, o_tensor=out_tensor: o_tensor(*index), name="fake_tensor_{}".format(i)) out_tensors[bind[1]] = fake_tensor inplaced_tensors.append(out_tensor) return (tuple(out_tensors + inplaced_tensors), tensor_binds)
def inplace_operate_bind(in_tensors, out_tensors, inplace_binds): """ Some tensor need to be calculate inplace. Args: in_tensors (Union[list, tuple]): Origin input tensors. out_tensors (Union[list, tuple]): Origin output tensors. inplace_binds (tuple): Should be a tuple of tuples, the first value of each element is input tensor index, the second is output tensor index, consist (in_id, out_id), meanning out_id output tensor is inplace update to in_id input tensor. Returns: Two elements tuple, one for output tensors, the other for tensor bind relations. """ for in_id, out_id in inplace_binds: if in_id >= len(in_tensors) or out_id >= len(out_tensors): raise RuntimeError("Inplace binds is invalid, while there are {} " "input tensors and {} output tensors, but get " "bind {}.".format( len(in_tensors), len(out_tensors), inplace_binds)) out_tensors = list(out_tensors) tensor_binds = {} inplaced_tensors = [] for i, bind in enumerate(inplace_binds): in_tensor = in_tensors[bind[0]] out_tensor = out_tensors[bind[1]] out_tensor, binds_info = TensorUtils.inplace_set( in_tensor, out_tensor, buffer_name="inp_buf_{}".format(i)) tensor_binds.update(binds_info) # Caculation is updated inplace in input tensor. But Mindspore # needs a related fake tensor(never use) in output list... out_tensor_shape = out_tensor.shape fake_tensor = akg.tvm.compute( out_tensor_shape, lambda *index, o_tensor=out_tensor: o_tensor(*index), name="fake_tensor_{}".format(i)) out_tensors[bind[1]] = fake_tensor inplaced_tensors.append(out_tensor) return (tuple(out_tensors + inplaced_tensors), tensor_binds)
Python
def FusedBn2(mean, var_part, running_mean, running_var, momentum=0.8, target=utils.CCE): """ Calculating mean, variance and update running variables. Read fused_bn1 docs for details. Note: Apply reduction of 'N' axis to calculating mean and variance. Args: mean (tvm.tensor.Tensor): Tensor of type float32 as mean. var_part (tvm.tensor.Tensor): Tensor of type float32, intermediate variables for variance. running_mean (tvm.tensor.Tensor): Tensor of type float32 as trained mean used in inference stage. running_var (tvm.tensor.Tensor): Tensor of type float32 as trained variance used in inference stage. momentum (float): A float number used for updating running values, must meet condition '0.0 < momentum < 1.0'. Returns: variance (tvm.tensor.Tensor): A float32 tensor as data's variance. running_mean_updated (tvm.tensor.Tensor): A float32 tensor as updated running_mean (updated inplace). running_var_updated (tvm.tensor.Tensor): A float32 tensor, updated running_var (updated inplace). """ utils.ops_dtype_check([mean.dtype, var_part.dtype], utils.DtypeForDavinci.FLOAT32) dim_info, _ = bn2_set_dim_func(mean, var_part, running_mean, running_var, momentum) attrs = {**ATTR_MAP_BN2} in_tensors = (var_part, mean, running_mean, running_var) sub_mean_square = akg.tvm.compute(mean.shape, lambda *i: akg.tvm.const(-1.0, dtype=mean.dtype) * mean(*i) * mean(*i), name="sub_mean_square") variance = akg.tvm.compute(mean.shape, lambda *i: var_part(*i) + sub_mean_square(*i), name="variance") # update running mean and variance running_mean_updated = \ update_by_moving_average(running_mean, mean, momentum) running_var_updated = \ update_by_moving_average(running_var, variance, momentum) out_tensors = (variance, running_mean_updated, running_var_updated) tensors_and_binds = inplace_operate_bind( in_tensors, out_tensors, ((2, 1), (3, 2))) out_tensors = tensors_and_binds[0] attrs[kernel_exec.BINDS] = tensors_and_binds[1] if dim_info != "": attrs["dim"] = dim_info return (*out_tensors, attrs)
def FusedBn2(mean, var_part, running_mean, running_var, momentum=0.8, target=utils.CCE): """ Calculating mean, variance and update running variables. Read fused_bn1 docs for details. Note: Apply reduction of 'N' axis to calculating mean and variance. Args: mean (tvm.tensor.Tensor): Tensor of type float32 as mean. var_part (tvm.tensor.Tensor): Tensor of type float32, intermediate variables for variance. running_mean (tvm.tensor.Tensor): Tensor of type float32 as trained mean used in inference stage. running_var (tvm.tensor.Tensor): Tensor of type float32 as trained variance used in inference stage. momentum (float): A float number used for updating running values, must meet condition '0.0 < momentum < 1.0'. Returns: variance (tvm.tensor.Tensor): A float32 tensor as data's variance. running_mean_updated (tvm.tensor.Tensor): A float32 tensor as updated running_mean (updated inplace). running_var_updated (tvm.tensor.Tensor): A float32 tensor, updated running_var (updated inplace). """ utils.ops_dtype_check([mean.dtype, var_part.dtype], utils.DtypeForDavinci.FLOAT32) dim_info, _ = bn2_set_dim_func(mean, var_part, running_mean, running_var, momentum) attrs = {**ATTR_MAP_BN2} in_tensors = (var_part, mean, running_mean, running_var) sub_mean_square = akg.tvm.compute(mean.shape, lambda *i: akg.tvm.const(-1.0, dtype=mean.dtype) * mean(*i) * mean(*i), name="sub_mean_square") variance = akg.tvm.compute(mean.shape, lambda *i: var_part(*i) + sub_mean_square(*i), name="variance") # update running mean and variance running_mean_updated = \ update_by_moving_average(running_mean, mean, momentum) running_var_updated = \ update_by_moving_average(running_var, variance, momentum) out_tensors = (variance, running_mean_updated, running_var_updated) tensors_and_binds = inplace_operate_bind( in_tensors, out_tensors, ((2, 1), (3, 2))) out_tensors = tensors_and_binds[0] attrs[kernel_exec.BINDS] = tensors_and_binds[1] if dim_info != "": attrs["dim"] = dim_info return (*out_tensors, attrs)
Python
def bn3_check(data, mean, variance, gamma, beta): """check fused_bn3's parameters availability""" shape = get_shape(data) dtype = data.dtype if len(shape) != 5: raise RuntimeError("Only support 5D data, " "but get {}!".format(shape)) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.ops_dtype_check([variance.dtype, mean.dtype, gamma.dtype, beta.dtype], utils.DtypeForDavinci.FLOAT32)
def bn3_check(data, mean, variance, gamma, beta): """check fused_bn3's parameters availability""" shape = get_shape(data) dtype = data.dtype if len(shape) != 5: raise RuntimeError("Only support 5D data, " "but get {}!".format(shape)) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.ops_dtype_check([variance.dtype, mean.dtype, gamma.dtype, beta.dtype], utils.DtypeForDavinci.FLOAT32)
Python
def FusedBn3(data, mean, variance, gamma, beta, eps=1e-3, target=utils.CCE): """ The third part of fused batch norm, calculate the normalized result. Read fused_bn1 docs for details. Note: This part is also the reference implement for fused_batch_norm! Args: data (tvm.tensor.Tensor): Tensor of type float16 or float32 with \"NC1HWC0\" format. mean (tvm.tensor.Tensor): Tensor of type float32, data's mean. variance (tvm.tensor.Tensor): Tensor of type float32, data's variance. gamma (tvm.tensor.Tensor): Tensor of type float32 for scaling. beta (tvm.tensor.Tensor): Tensor of type float32 for bias. eps (float): small float value to avoid dividing zero. Returns: Tensor as normalized, scaled, shifted data. """ bn3_check(data, mean, variance, gamma, beta) dim_info, _ = bn3_set_dim_func(data, mean, variance, gamma, beta, eps) attrs = {**DEFAULT_ATTR_MAP_BN3} ori_dtype = data.dtype # calculate batch norm result rsd = Rsqrt(akg.tvm.compute(variance.shape, lambda *i: variance(*i) + akg.tvm.const(eps, dtype=variance.dtype), name="var_eps"), utils.CCE) hat_gamma = akg.tvm.compute(gamma.shape, lambda *i: gamma(*i) * rsd(*i), name="hat_gamma", attrs={'no_inline': 1}) hat_beta = akg.tvm.compute(gamma.shape, lambda *i: beta(*i) - hat_gamma(*i) * mean(*i), name="hat_beta", attrs={'no_inline': 1}) hat_gamma_bc = akg.lang.ascend.broadcast(hat_gamma, data.shape) hat_beta_bc = akg.lang.ascend.broadcast(hat_beta, data.shape) data_fp32 = akg.tvm.compute(data.shape, lambda *i: data(*i).astype("float32"), name="data_fp32") bn_res_fp32 = akg.tvm.compute(data.shape, lambda *i: akg.lang.ascend.vmadd(data_fp32(*i), hat_gamma_bc(*i), hat_beta_bc(*i)), name="bn_res_fp32") res = akg.tvm.compute(bn_res_fp32.shape, lambda *i: bn_res_fp32(*i).astype(ori_dtype), name="bn_res") if dim_info != "": attrs["dim"] = dim_info return res, attrs
def FusedBn3(data, mean, variance, gamma, beta, eps=1e-3, target=utils.CCE): """ The third part of fused batch norm, calculate the normalized result. Read fused_bn1 docs for details. Note: This part is also the reference implement for fused_batch_norm! Args: data (tvm.tensor.Tensor): Tensor of type float16 or float32 with \"NC1HWC0\" format. mean (tvm.tensor.Tensor): Tensor of type float32, data's mean. variance (tvm.tensor.Tensor): Tensor of type float32, data's variance. gamma (tvm.tensor.Tensor): Tensor of type float32 for scaling. beta (tvm.tensor.Tensor): Tensor of type float32 for bias. eps (float): small float value to avoid dividing zero. Returns: Tensor as normalized, scaled, shifted data. """ bn3_check(data, mean, variance, gamma, beta) dim_info, _ = bn3_set_dim_func(data, mean, variance, gamma, beta, eps) attrs = {**DEFAULT_ATTR_MAP_BN3} ori_dtype = data.dtype # calculate batch norm result rsd = Rsqrt(akg.tvm.compute(variance.shape, lambda *i: variance(*i) + akg.tvm.const(eps, dtype=variance.dtype), name="var_eps"), utils.CCE) hat_gamma = akg.tvm.compute(gamma.shape, lambda *i: gamma(*i) * rsd(*i), name="hat_gamma", attrs={'no_inline': 1}) hat_beta = akg.tvm.compute(gamma.shape, lambda *i: beta(*i) - hat_gamma(*i) * mean(*i), name="hat_beta", attrs={'no_inline': 1}) hat_gamma_bc = akg.lang.ascend.broadcast(hat_gamma, data.shape) hat_beta_bc = akg.lang.ascend.broadcast(hat_beta, data.shape) data_fp32 = akg.tvm.compute(data.shape, lambda *i: data(*i).astype("float32"), name="data_fp32") bn_res_fp32 = akg.tvm.compute(data.shape, lambda *i: akg.lang.ascend.vmadd(data_fp32(*i), hat_gamma_bc(*i), hat_beta_bc(*i)), name="bn_res_fp32") res = akg.tvm.compute(bn_res_fp32.shape, lambda *i: bn_res_fp32(*i).astype(ori_dtype), name="bn_res") if dim_info != "": attrs["dim"] = dim_info return res, attrs
Python
def xlogy_grad_compute(placeholders, shape_max, dtype, rx, ry): """ do element-wise xlogy_grad compute Args: placeholders (Union[list, typle]): the placeholder of data input shape_max (Union[list, typle]): the shape of broadcast dtype (string): the type of data input rx (list): the reduction indices of data input with broadcast ry (list): the reduction indices for data input with broadcast Returns output_y1 (tvm.tensor.Tensor): result of xlogy_grad output_y2 (tvm.tensor.Tensor): result of xlogy_grad """ x1_ori = placeholders[0] x2_ori = placeholders[1] grad_ori = placeholders[2] if dtype == "float16": x1 = akg.lang.ascend.cast_to(x1_ori, "float32") x2 = akg.lang.ascend.cast_to(x2_ori, "float32") grad = akg.lang.ascend.cast_to(grad_ori, "float32") x1 = akg.lang.ascend.broadcast(x1, shape_max) x2 = akg.lang.ascend.broadcast(x2, shape_max) grad = akg.lang.ascend.broadcast(grad, shape_max) else: x1 = akg.lang.ascend.broadcast(x1_ori, shape_max) x2 = akg.lang.ascend.broadcast(x2_ori, shape_max) grad = akg.lang.ascend.broadcast(grad_ori, shape_max) esp_min = tvm.const(1.18e-38, dtype="float32") x1_addespmin = akg.lang.ascend.vadds(x1, esp_min) if product_is_mini(): not_zero_x1 = akg.lang.ascend.vmul(x1, Reciprocal(x1_addespmin)) log_x2 = tvm.compute( x2.shape, lambda *i: (tvm.log(x2(*i).astype("float16"))).astype("float32"), name="log_x2") else: not_zero_x1 = Divide(x1, x1_addespmin, target="cce") log_x2 = akg.lang.ascend.vlog(x2) partial_x1 = akg.lang.ascend.vmul(not_zero_x1, log_x2) partial_x1g = akg.lang.ascend.vmul(partial_x1, grad) partial_x2 = Divide(x1, x2, target="cce") if not product_is_mini() else \ akg.lang.ascend.vmul(x1, Reciprocal(x2)) partial_x2g = akg.lang.ascend.vmul(partial_x2, grad) output_y1 = akg.lang.ascend.sum(partial_x1g, rx, keepdims=True) output_y2 = akg.lang.ascend.sum(partial_x2g, ry, keepdims=True) if dtype == "float16": output_y1 = akg.lang.ascend.cast_to(output_y1, "float16") output_y2 = akg.lang.ascend.cast_to(output_y2, "float16") return output_y1, output_y2
def xlogy_grad_compute(placeholders, shape_max, dtype, rx, ry): """ do element-wise xlogy_grad compute Args: placeholders (Union[list, typle]): the placeholder of data input shape_max (Union[list, typle]): the shape of broadcast dtype (string): the type of data input rx (list): the reduction indices of data input with broadcast ry (list): the reduction indices for data input with broadcast Returns output_y1 (tvm.tensor.Tensor): result of xlogy_grad output_y2 (tvm.tensor.Tensor): result of xlogy_grad """ x1_ori = placeholders[0] x2_ori = placeholders[1] grad_ori = placeholders[2] if dtype == "float16": x1 = akg.lang.ascend.cast_to(x1_ori, "float32") x2 = akg.lang.ascend.cast_to(x2_ori, "float32") grad = akg.lang.ascend.cast_to(grad_ori, "float32") x1 = akg.lang.ascend.broadcast(x1, shape_max) x2 = akg.lang.ascend.broadcast(x2, shape_max) grad = akg.lang.ascend.broadcast(grad, shape_max) else: x1 = akg.lang.ascend.broadcast(x1_ori, shape_max) x2 = akg.lang.ascend.broadcast(x2_ori, shape_max) grad = akg.lang.ascend.broadcast(grad_ori, shape_max) esp_min = tvm.const(1.18e-38, dtype="float32") x1_addespmin = akg.lang.ascend.vadds(x1, esp_min) if product_is_mini(): not_zero_x1 = akg.lang.ascend.vmul(x1, Reciprocal(x1_addespmin)) log_x2 = tvm.compute( x2.shape, lambda *i: (tvm.log(x2(*i).astype("float16"))).astype("float32"), name="log_x2") else: not_zero_x1 = Divide(x1, x1_addespmin, target="cce") log_x2 = akg.lang.ascend.vlog(x2) partial_x1 = akg.lang.ascend.vmul(not_zero_x1, log_x2) partial_x1g = akg.lang.ascend.vmul(partial_x1, grad) partial_x2 = Divide(x1, x2, target="cce") if not product_is_mini() else \ akg.lang.ascend.vmul(x1, Reciprocal(x2)) partial_x2g = akg.lang.ascend.vmul(partial_x2, grad) output_y1 = akg.lang.ascend.sum(partial_x1g, rx, keepdims=True) output_y2 = akg.lang.ascend.sum(partial_x2g, ry, keepdims=True) if dtype == "float16": output_y1 = akg.lang.ascend.cast_to(output_y1, "float16") output_y2 = akg.lang.ascend.cast_to(output_y2, "float16") return output_y1, output_y2
Python
def xlogy_grad(x1, x2, grad, target=utils.CCE): """ Returns gradient of xlogy(x1, x2) with respect to x1 and x2. Args: x1 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32". x2 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32". grad (tvm.tensor.Tensor): Gradient tensor of dtype "float16" or "float32". Returns: Two tvm.tensor.Tensor as gradients for x1 and x2. """ shape_x1 = get_shape(x1) dtype_x1 = x1.dtype shape_x2 = get_shape(x2) dtype_x2 = x2.dtype shape_grad = get_shape(grad) dtype_grad = grad.dtype if dtype_x1 != dtype_x2 or dtype_x2 != dtype_grad or dtype_grad != dtype_x1: raise RuntimeError( "the type of x1, x2 and grad must be the same," "while dtype_x1 = %s, dtype_x2 = %s, dtype_grad = %s" % (dtype_x1, dtype_x2, dtype_grad)) utils.check_shape(shape_x1) utils.check_shape(shape_x2) utils.check_shape(shape_grad) utils.ops_dtype_check(dtype_x1, utils.DtypeForDavinci.ALL_FLOAT) shape_x1, shape_x2, shape_max_x1x2 = produce_shapes(shape_x1, shape_x2) if len(shape_max_x1x2) < len(shape_grad): raise RuntimeError( "the length of shape_grad can not be longer than the maximum " "length of x1 and x2, while shape_grad = %s, shape_max= %s" % (list(shape_grad), shape_max_x1x2)) shape_grad, _, shape_max = produce_shapes(shape_grad, shape_max_x1x2) for (x, y) in zip(shape_max_x1x2, shape_grad): if x < y: raise RuntimeError( "Don't support this shape. while shape_max = %s, shape_grad " "= %s" % (shape_max_x1x2, list(shape_grad))) rx, ry = broadcast_gradient_args(shape_x1, shape_x2) return xlogy_grad_compute([x1, x2, grad], shape_max, dtype_x1, rx, ry)
def xlogy_grad(x1, x2, grad, target=utils.CCE): """ Returns gradient of xlogy(x1, x2) with respect to x1 and x2. Args: x1 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32". x2 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32". grad (tvm.tensor.Tensor): Gradient tensor of dtype "float16" or "float32". Returns: Two tvm.tensor.Tensor as gradients for x1 and x2. """ shape_x1 = get_shape(x1) dtype_x1 = x1.dtype shape_x2 = get_shape(x2) dtype_x2 = x2.dtype shape_grad = get_shape(grad) dtype_grad = grad.dtype if dtype_x1 != dtype_x2 or dtype_x2 != dtype_grad or dtype_grad != dtype_x1: raise RuntimeError( "the type of x1, x2 and grad must be the same," "while dtype_x1 = %s, dtype_x2 = %s, dtype_grad = %s" % (dtype_x1, dtype_x2, dtype_grad)) utils.check_shape(shape_x1) utils.check_shape(shape_x2) utils.check_shape(shape_grad) utils.ops_dtype_check(dtype_x1, utils.DtypeForDavinci.ALL_FLOAT) shape_x1, shape_x2, shape_max_x1x2 = produce_shapes(shape_x1, shape_x2) if len(shape_max_x1x2) < len(shape_grad): raise RuntimeError( "the length of shape_grad can not be longer than the maximum " "length of x1 and x2, while shape_grad = %s, shape_max= %s" % (list(shape_grad), shape_max_x1x2)) shape_grad, _, shape_max = produce_shapes(shape_grad, shape_max_x1x2) for (x, y) in zip(shape_max_x1x2, shape_grad): if x < y: raise RuntimeError( "Don't support this shape. while shape_max = %s, shape_grad " "= %s" % (shape_max_x1x2, list(shape_grad))) rx, ry = broadcast_gradient_args(shape_x1, shape_x2) return xlogy_grad_compute([x1, x2, grad], shape_max, dtype_x1, rx, ry)
Python
def NotEqual(data1, data2, target=utils.CCE): """ check whether data1 notequals to data2. Args: data1 (tvm.tensor.Tensor): Tensor. data2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _not_equal_ascend(data1, data2) else: return _not_equal(data1, data2)
def NotEqual(data1, data2, target=utils.CCE): """ check whether data1 notequals to data2. Args: data1 (tvm.tensor.Tensor): Tensor. data2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _not_equal_ascend(data1, data2) else: return _not_equal(data1, data2)
Python
def matrix_diag_part(input_diagonal, input_help): """ Calculate the batched diagonal part of a batched tensor. Note: input_help is a tensor with a diagonal element of 1 and other positions of 0, the last two dimensions can be unequal. Args: input_diagonal (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8. The last two dimensions can be unequal. input_help (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8, and with a diagonal element of 1 and other positions of 0. Returns: tvm.tensor.Tensor, has the same type as input_diagonal, the shape dims is equal to dims(input_diagonal) - 1. """ dtype_input_diagonal = input_diagonal.dtype dtype_input_help = input_help.dtype utils.elemwise_shape_check(input_help.shape, input_diagonal.shape) if len(input_help.shape) < 2: raise ValueError("Input tensors of rank>=2 are supported!") utils.ops_dtype_check([dtype_input_diagonal, dtype_input_help], [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8]) res = matrix_diag_part_compute(input_diagonal, input_help) return res
def matrix_diag_part(input_diagonal, input_help): """ Calculate the batched diagonal part of a batched tensor. Note: input_help is a tensor with a diagonal element of 1 and other positions of 0, the last two dimensions can be unequal. Args: input_diagonal (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8. The last two dimensions can be unequal. input_help (tvm.tensor.Tensor): Tensor of float32, float16, int32, int8, uint8, and with a diagonal element of 1 and other positions of 0. Returns: tvm.tensor.Tensor, has the same type as input_diagonal, the shape dims is equal to dims(input_diagonal) - 1. """ dtype_input_diagonal = input_diagonal.dtype dtype_input_help = input_help.dtype utils.elemwise_shape_check(input_help.shape, input_diagonal.shape) if len(input_help.shape) < 2: raise ValueError("Input tensors of rank>=2 are supported!") utils.ops_dtype_check([dtype_input_diagonal, dtype_input_help], [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.INT32, utils.DtypeForDavinci.UINT8]) res = matrix_diag_part_compute(input_diagonal, input_help) return res
Python
def random_gaussian(size, miu=3, sigma=1): """ Generate random array with absolution value obeys gaussian distribution """ if sigma <= 0: sys.stderr.write("Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma) sys.exit(1) rgn = np.random.RandomState(2019) ret = rgn.normal(miu, sigma, size) for x in np.nditer(ret, op_flags=['readwrite']): if np.random.randint(0, 2): continue x[...] = x * -1 return ret
def random_gaussian(size, miu=3, sigma=1): """ Generate random array with absolution value obeys gaussian distribution """ if sigma <= 0: sys.stderr.write("Error: Expect positive sigmal for gaussian distribution. but get %f\n" % sigma) sys.exit(1) rgn = np.random.RandomState(2019) ret = rgn.normal(miu, sigma, size) for x in np.nditer(ret, op_flags=['readwrite']): if np.random.randint(0, 2): continue x[...] = x * -1 return ret
Python
def logsoftmax_grad(Y, dY, axis): """ Computes the back propagation gradients by chain rule. Args: Y: Tensor, holds the logsoftmax activation output. dY: Tensor, holds the initial gradients. axis: Integer, on which dimension the softmax is applied. Returns: Tensor, the overall gradients. """ shape = [x.value for x in Y.shape] utils.check_shape(shape) dtype = Y.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) if axis == -1: axis = len(shape) + axis if axis >= len(shape): raise RuntimeError("axis should be less than dimension") if axis < -1: raise RuntimeError("negative axis only support -1, please specify the axis in positive value") softmax = akg.topi.exp(Y) dy_sum = akg.lang.ascend.sum(dY, axis=axis) dy_sum_broadcast = akg.lang.ascend.broadcast(dy_sum, shape) mul_result = akg.lang.ascend.vmul(softmax, dy_sum_broadcast) res = akg.lang.ascend.vsub(dY, mul_result) attrs = {"pragma_modshift": 1} return res, attrs
def logsoftmax_grad(Y, dY, axis): """ Computes the back propagation gradients by chain rule. Args: Y: Tensor, holds the logsoftmax activation output. dY: Tensor, holds the initial gradients. axis: Integer, on which dimension the softmax is applied. Returns: Tensor, the overall gradients. """ shape = [x.value for x in Y.shape] utils.check_shape(shape) dtype = Y.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT) if axis == -1: axis = len(shape) + axis if axis >= len(shape): raise RuntimeError("axis should be less than dimension") if axis < -1: raise RuntimeError("negative axis only support -1, please specify the axis in positive value") softmax = akg.topi.exp(Y) dy_sum = akg.lang.ascend.sum(dY, axis=axis) dy_sum_broadcast = akg.lang.ascend.broadcast(dy_sum, shape) mul_result = akg.lang.ascend.vmul(softmax, dy_sum_broadcast) res = akg.lang.ascend.vsub(dY, mul_result) attrs = {"pragma_modshift": 1} return res, attrs
Python
def Less(data1, data2, target=utils.CCE): """ compute tensor with smaller value in data1 and data2 elementwisely. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32 and int32. data2 (tvm.tensor.Tensor): Tensor of type float16, float32 and int32. Returns: tvm.tensor.Tensor. If data1 less than data2, return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) utils.check_shape(data1.shape) utils.check_shape(data2.shape) # check types if target == utils.CCE: utils.elemwise_dtype_check(data1.dtype, data2.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) # check runtime mode, and change dtype if product_is_mini() and data1.dtype != "float16": data1 = akg.topi.cast(data1, "float16") data2 = akg.topi.cast(data2, "float16") if (not product_is_mini()) and data1.dtype == "int32": data1 = akg.topi.cast(data1, "float32") data2 = akg.topi.cast(data2, "float32") res = akg.topi.less(data1, data2) return res
def Less(data1, data2, target=utils.CCE): """ compute tensor with smaller value in data1 and data2 elementwisely. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32 and int32. data2 (tvm.tensor.Tensor): Tensor of type float16, float32 and int32. Returns: tvm.tensor.Tensor. If data1 less than data2, return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) utils.check_shape(data1.shape) utils.check_shape(data2.shape) # check types if target == utils.CCE: utils.elemwise_dtype_check(data1.dtype, data2.dtype, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) # check runtime mode, and change dtype if product_is_mini() and data1.dtype != "float16": data1 = akg.topi.cast(data1, "float16") data2 = akg.topi.cast(data2, "float16") if (not product_is_mini()) and data1.dtype == "int32": data1 = akg.topi.cast(data1, "float32") data2 = akg.topi.cast(data2, "float32") res = akg.topi.less(data1, data2) return res
Python
def AbsSum(inputs, axis=None, keepdims=False, target=utils.CCE): """ Computes the sum of absolute value of inputs tensor along axis. Args: inputs: The inputs akg.tvm.tensor. axis: An integer, specifies the dimensions to reduce when performing the sum operation. keepdims: A boolean, if True, retains reduced dimensions with length 1, default value is False. Returns: A akg.tvm.Tensor of same type as inputs. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) input_abs = Abs(inputs, target) return Sum(input_abs, axis, keepdims, target=target)
def AbsSum(inputs, axis=None, keepdims=False, target=utils.CCE): """ Computes the sum of absolute value of inputs tensor along axis. Args: inputs: The inputs akg.tvm.tensor. axis: An integer, specifies the dimensions to reduce when performing the sum operation. keepdims: A boolean, if True, retains reduced dimensions with length 1, default value is False. Returns: A akg.tvm.Tensor of same type as inputs. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) input_abs = Abs(inputs, target) return Sum(input_abs, axis, keepdims, target=target)
Python
def five2four_tiling_strategy(tensor, c_value=None, expansion=None): """Custom tiling strategy for five2four op.""" strategy = list() if c_value is None: strategy = ct_util.create_template(tensor=tensor, template=ct_util.TileTemplate.NC1HWC0) elif not shape_is_dynamic(tensor): c_value = 16 if c_value < 16 else c_value node_n = ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=0) node_c1 = ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=1) node_c0 = ct_util.create_constraint_on_tensor(tensor=tensor, values=c_value, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=4) strategy = node_n + node_c1 + node_c0 if expansion: strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor, values=expansion, constraints=ct_util.TileConstraint.SET_EXPANSION)[0]) if shape_is_dynamic(tensor): # axis should be full tiled due to cast operator strategy.append(ct_util.modify_common_constraints( value=0.85, constraint=ct_util.TileConstraint.SET_MEM_RATIO)) return strategy
def five2four_tiling_strategy(tensor, c_value=None, expansion=None): """Custom tiling strategy for five2four op.""" strategy = list() if c_value is None: strategy = ct_util.create_template(tensor=tensor, template=ct_util.TileTemplate.NC1HWC0) elif not shape_is_dynamic(tensor): c_value = 16 if c_value < 16 else c_value node_n = ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=0) node_c1 = ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=1) node_c0 = ct_util.create_constraint_on_tensor(tensor=tensor, values=c_value, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=4) strategy = node_n + node_c1 + node_c0 if expansion: strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor, values=expansion, constraints=ct_util.TileConstraint.SET_EXPANSION)[0]) if shape_is_dynamic(tensor): # axis should be full tiled due to cast operator strategy.append(ct_util.modify_common_constraints( value=0.85, constraint=ct_util.TileConstraint.SET_MEM_RATIO)) return strategy
Python
def apply_proximal_gradient_descent_run(shape, dtype, attrs=None): """run function for dsl function apply_proximal_gradient_descent.""" scalar_shape = (1,) var_shape, delta_shape = shape, shape alpha_shape, l1_shape, l2_shape = [scalar_shape] * 3 shapes = [var_shape, alpha_shape, l1_shape, l2_shape, delta_shape] dtypes = [dtype] * 5 mod = utils.op_build_test(apply_proximal_gradient_descent, shapes, dtypes, kernel_name='apply_proximal_gradient_descent', attrs=attrs) expect, (var, alpha, l1, l2, delta) = gen_data(dtype, shape) output = utils.mod_launch(mod, (var, alpha, l1, l2, delta), outputs=(0, )) rtol, atol = get_rtol_atol("apply_proximal_gradient_descent", dtype) compare_result = compare_tensor(output, expect, rtol=rtol, atol=atol) inputs = (var, alpha, l1, l2, delta) return inputs, output, expect, compare_result
def apply_proximal_gradient_descent_run(shape, dtype, attrs=None): """run function for dsl function apply_proximal_gradient_descent.""" scalar_shape = (1,) var_shape, delta_shape = shape, shape alpha_shape, l1_shape, l2_shape = [scalar_shape] * 3 shapes = [var_shape, alpha_shape, l1_shape, l2_shape, delta_shape] dtypes = [dtype] * 5 mod = utils.op_build_test(apply_proximal_gradient_descent, shapes, dtypes, kernel_name='apply_proximal_gradient_descent', attrs=attrs) expect, (var, alpha, l1, l2, delta) = gen_data(dtype, shape) output = utils.mod_launch(mod, (var, alpha, l1, l2, delta), outputs=(0, )) rtol, atol = get_rtol_atol("apply_proximal_gradient_descent", dtype) compare_result = compare_tensor(output, expect, rtol=rtol, atol=atol) inputs = (var, alpha, l1, l2, delta) return inputs, output, expect, compare_result
Python
def gen_data(dtype, shape): """Generate data for testing the op""" # tensors var = random_gaussian(shape).astype(dtype) delta = random_gaussian(shape).astype(dtype) # scalars scalar_shape = (1,) alpha = np.random.random_sample(scalar_shape).astype(dtype) l1 = np.random.randn(*scalar_shape).astype(dtype) l2 = np.random.random_sample(scalar_shape).astype(dtype) input_data = (var, alpha, l1, l2, delta) # expects expect = apply_proximal_gradient_descent_impl(var, alpha, l1, l2, delta) return expect, input_data
def gen_data(dtype, shape): """Generate data for testing the op""" # tensors var = random_gaussian(shape).astype(dtype) delta = random_gaussian(shape).astype(dtype) # scalars scalar_shape = (1,) alpha = np.random.random_sample(scalar_shape).astype(dtype) l1 = np.random.randn(*scalar_shape).astype(dtype) l2 = np.random.random_sample(scalar_shape).astype(dtype) input_data = (var, alpha, l1, l2, delta) # expects expect = apply_proximal_gradient_descent_impl(var, alpha, l1, l2, delta) return expect, input_data
Python
def cos_zero(input): """Computes cosine value of a tensor. :math: `cos(x) = 1-x^2(1/2-x^2(1/4!-x^2(1/6!-x^2(1/8!-1/10!*x^2(...)))))` Args: input (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor of same type and shape as in_data. """ input_square = akg.tvm.compute(input.shape, lambda *i: input(*i) * input(*i), name="input_square") tylor_list_len = len(tylor_list) mid = akg.tvm.compute(input.shape, lambda *index: input_square(*index) * tylor_list[-1], name="mid_res_last") for i, tylor_value in reversed(list(enumerate(tylor_list[:-1]))): name = "mid_res" + str(tylor_list_len - 1 - i) mid = akg.tvm.compute(input.shape, lambda *index: input_square(*index) * (tylor_value - mid(*index)), name=name) res = akg.tvm.compute(input.shape, lambda *index: akg.tvm.const(1.0, dtype="float32") - mid(*index), name="res") return res
def cos_zero(input): """Computes cosine value of a tensor. :math: `cos(x) = 1-x^2(1/2-x^2(1/4!-x^2(1/6!-x^2(1/8!-1/10!*x^2(...)))))` Args: input (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor of same type and shape as in_data. """ input_square = akg.tvm.compute(input.shape, lambda *i: input(*i) * input(*i), name="input_square") tylor_list_len = len(tylor_list) mid = akg.tvm.compute(input.shape, lambda *index: input_square(*index) * tylor_list[-1], name="mid_res_last") for i, tylor_value in reversed(list(enumerate(tylor_list[:-1]))): name = "mid_res" + str(tylor_list_len - 1 - i) mid = akg.tvm.compute(input.shape, lambda *index: input_square(*index) * (tylor_value - mid(*index)), name=name) res = akg.tvm.compute(input.shape, lambda *index: akg.tvm.const(1.0, dtype="float32") - mid(*index), name="res") return res
Python
def cos_dsl(input_x): """Compute cosine value of a tensor.""" type_x = input_x.dtype check_list = ["float16", "float32"] if not (type_x.lower() in check_list): raise RuntimeError("cos only support %s while dtype is %s" % (",".join(check_list), type_x)) utils.check_shape(input_x.shape) if type_x == "float16": input_x = akg.lang.ascend.cast_to(input_x, "float32") pi_multiple = akg.lang.ascend.vmuls(input_x, 1 / pi) round_float = akg.lang.ascend.cast_to(akg.lang.ascend.round(pi_multiple), "float32") # to adjust x to [-pi/2, pi/2] trans_x = akg.lang.ascend.vsub(input_x, akg.lang.ascend.vmuls(round_float, pi)) res_trans_x = cos_zero(trans_x) res_mid = res_trans_x # if round is odd, the final result need to mutiply -1. # Need to multipy 1/2 to get the ceil value ceil_value = akg.lang.ascend.ceil(akg.lang.ascend.vmuls(round_float, 1 / 2)) # if odd, ceil*2-round is 1,if even, the value is 0 sub_value = akg.lang.ascend.vsub( akg.lang.ascend.vmuls(ceil_value, akg.tvm.const(2.0, "float32")), round_float) tensor_one = akg.lang.ascend.broadcast(akg.tvm.const(1.0, "float32"), input_x.shape) odd_tensor = akg.lang.ascend.vsub(tensor_one, sub_value) even_tensor = akg.lang.ascend.vsub(odd_tensor, tensor_one) odd_even_tensor = akg.lang.ascend.vadd(odd_tensor, even_tensor) res = akg.lang.ascend.vmul(res_mid, odd_even_tensor) if type_x == "float16": res = akg.lang.ascend.cast_to(res, "float16") return res
def cos_dsl(input_x): """Compute cosine value of a tensor.""" type_x = input_x.dtype check_list = ["float16", "float32"] if not (type_x.lower() in check_list): raise RuntimeError("cos only support %s while dtype is %s" % (",".join(check_list), type_x)) utils.check_shape(input_x.shape) if type_x == "float16": input_x = akg.lang.ascend.cast_to(input_x, "float32") pi_multiple = akg.lang.ascend.vmuls(input_x, 1 / pi) round_float = akg.lang.ascend.cast_to(akg.lang.ascend.round(pi_multiple), "float32") # to adjust x to [-pi/2, pi/2] trans_x = akg.lang.ascend.vsub(input_x, akg.lang.ascend.vmuls(round_float, pi)) res_trans_x = cos_zero(trans_x) res_mid = res_trans_x # if round is odd, the final result need to mutiply -1. # Need to multipy 1/2 to get the ceil value ceil_value = akg.lang.ascend.ceil(akg.lang.ascend.vmuls(round_float, 1 / 2)) # if odd, ceil*2-round is 1,if even, the value is 0 sub_value = akg.lang.ascend.vsub( akg.lang.ascend.vmuls(ceil_value, akg.tvm.const(2.0, "float32")), round_float) tensor_one = akg.lang.ascend.broadcast(akg.tvm.const(1.0, "float32"), input_x.shape) odd_tensor = akg.lang.ascend.vsub(tensor_one, sub_value) even_tensor = akg.lang.ascend.vsub(odd_tensor, tensor_one) odd_even_tensor = akg.lang.ascend.vadd(odd_tensor, even_tensor) res = akg.lang.ascend.vmul(res_mid, odd_even_tensor) if type_x == "float16": res = akg.lang.ascend.cast_to(res, "float16") return res
Python
def cos(input_x, target=utils.CCE): """Compute cosine value of a tensor.""" dtype = input_x.dtype shape = get_shape(input_x) utils.ops_dtype_check(input_x.dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.check_shape(input_x.shape) if dtype == "float16": input_x = akg.lang.ascend.cast_to(input_x, "float32") res = akg.tvm.compute(shape, lambda *indice: akg.lang.ascend.cos(input_x(*indice)), name="res") # cast the dtype to float16 if dtype == "float16": res = akg.lang.ascend.cast_to(res, "float16") return res, get_attrs()
def cos(input_x, target=utils.CCE): """Compute cosine value of a tensor.""" dtype = input_x.dtype shape = get_shape(input_x) utils.ops_dtype_check(input_x.dtype, utils.DtypeForDavinci.ALL_FLOAT) utils.check_shape(input_x.shape) if dtype == "float16": input_x = akg.lang.ascend.cast_to(input_x, "float32") res = akg.tvm.compute(shape, lambda *indice: akg.lang.ascend.cos(input_x(*indice)), name="res") # cast the dtype to float16 if dtype == "float16": res = akg.lang.ascend.cast_to(res, "float16") return res, get_attrs()
Python
def GenData(shape, dtype): """ Generate data for testing the op """ class_num = shape[1] labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0]) labels = np.eye(class_num)[labels_int].astype(dtype) logits = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype) testsub = logits - np.max(logits, axis=-1, keepdims=True) input_exp = np.exp(testsub) softmax = input_exp / np.sum(input_exp, axis=-1, keepdims=True) loss_all = labels * np.log(softmax) * -1 loss = np.sum(loss_all, axis=-1) lossNew = np.expand_dims(loss, axis=1) grad = (softmax - labels) * lossNew return labels, logits, loss, grad
def GenData(shape, dtype): """ Generate data for testing the op """ class_num = shape[1] labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0]) labels = np.eye(class_num)[labels_int].astype(dtype) logits = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype) testsub = logits - np.max(logits, axis=-1, keepdims=True) input_exp = np.exp(testsub) softmax = input_exp / np.sum(input_exp, axis=-1, keepdims=True) loss_all = labels * np.log(softmax) * -1 loss = np.sum(loss_all, axis=-1) lossNew = np.expand_dims(loss, axis=1) grad = (softmax - labels) * lossNew return labels, logits, loss, grad
Python
def sgd_run(shape, dtype, nesterov=False, dampening=0.0, weight_decay=0.0, lr_mat=0.1, momt_mat=0.9, attrs=None): """run function for dsl function sgd.""" lr = np.full((1,), lr_mat).astype(dtype) momt = np.full((1,), momt_mat).astype(dtype) mod = utils.op_build_test(sgd.sgd, [shape, shape, shape, shape, lr.shape, momt.shape], [dtype, dtype, dtype, dtype, dtype, dtype], [dampening, weight_decay, nesterov], kernel_name='sgd', attrs=attrs) parameters, gradient, accum, stat, parameters_t, accum_t, stat_t, output_para, output_accum, output_stat \ = gen_data(dtype, shape, lr, momt, dampening, weight_decay, nesterov) output_para, output_accum, output_stat = utils.mod_launch(mod, (parameters, gradient, accum, stat, lr, momt), outputs=(0, 2, 3), expect=(parameters_t, accum_t, stat_t)) expects = (parameters_t, accum_t, stat_t) outputs = (output_para, output_accum, output_stat) rtol, atol = get_rtol_atol("sgd", dtype) testcase_result = compare_tensor(outputs, expects, rtol=rtol, atol=atol, equal_nan=True) return (parameters, gradient, accum, stat), (output_para, output_accum, output_stat), \ (parameters_t, accum_t, stat_t), testcase_result
def sgd_run(shape, dtype, nesterov=False, dampening=0.0, weight_decay=0.0, lr_mat=0.1, momt_mat=0.9, attrs=None): """run function for dsl function sgd.""" lr = np.full((1,), lr_mat).astype(dtype) momt = np.full((1,), momt_mat).astype(dtype) mod = utils.op_build_test(sgd.sgd, [shape, shape, shape, shape, lr.shape, momt.shape], [dtype, dtype, dtype, dtype, dtype, dtype], [dampening, weight_decay, nesterov], kernel_name='sgd', attrs=attrs) parameters, gradient, accum, stat, parameters_t, accum_t, stat_t, output_para, output_accum, output_stat \ = gen_data(dtype, shape, lr, momt, dampening, weight_decay, nesterov) output_para, output_accum, output_stat = utils.mod_launch(mod, (parameters, gradient, accum, stat, lr, momt), outputs=(0, 2, 3), expect=(parameters_t, accum_t, stat_t)) expects = (parameters_t, accum_t, stat_t) outputs = (output_para, output_accum, output_stat) rtol, atol = get_rtol_atol("sgd", dtype) testcase_result = compare_tensor(outputs, expects, rtol=rtol, atol=atol, equal_nan=True) return (parameters, gradient, accum, stat), (output_para, output_accum, output_stat), \ (parameters_t, accum_t, stat_t), testcase_result
Python
def gen_data(dtype, shape, lr, momt, dampening, weight_decay, nesterov): """Generate data for testing the op""" parameters = random_gaussian(shape, miu=10, sigma=0.3).astype(dtype) gradient = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype) accum = random_gaussian(shape, miu=4, sigma=0.3).astype(dtype) stat = random_gaussian(shape, miu=5, sigma=0.3).astype(dtype) if weight_decay != 0.0: parameters = parameters * 1.0 grad_delta = parameters * weight_decay gradient_new = gradient + grad_delta else: gradient_new = gradient stat_mid = -1.0 * stat stat_act = stat_mid + 1.0 dampening_t = stat_act * dampening # update accum accum_delta = accum * momt[0] gradient_damp = gradient_new * dampening_t accum_t = gradient_new + accum_delta if dampening != 0.0: accum_t = accum_t - gradient_damp # update parameters if nesterov: parameters_delta = gradient_new * lr[0] parameters_delta_2 = accum_t * momt[0] * lr[0] parameters_delta = parameters_delta + parameters_delta_2 parameters_t = parameters - parameters_delta else: parameters_delta = accum_t * lr[0] parameters_t = parameters - parameters_delta # update stat stat_t = stat_act * 0.0 output_para = np.full(shape, np.nan, dtype) output_accum = np.full(shape, np.nan, dtype) output_stat = np.full(shape, np.nan, dtype) return parameters, gradient, accum, stat, parameters_t, accum_t, stat_t, output_para, output_accum, output_stat
def gen_data(dtype, shape, lr, momt, dampening, weight_decay, nesterov): """Generate data for testing the op""" parameters = random_gaussian(shape, miu=10, sigma=0.3).astype(dtype) gradient = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype) accum = random_gaussian(shape, miu=4, sigma=0.3).astype(dtype) stat = random_gaussian(shape, miu=5, sigma=0.3).astype(dtype) if weight_decay != 0.0: parameters = parameters * 1.0 grad_delta = parameters * weight_decay gradient_new = gradient + grad_delta else: gradient_new = gradient stat_mid = -1.0 * stat stat_act = stat_mid + 1.0 dampening_t = stat_act * dampening # update accum accum_delta = accum * momt[0] gradient_damp = gradient_new * dampening_t accum_t = gradient_new + accum_delta if dampening != 0.0: accum_t = accum_t - gradient_damp # update parameters if nesterov: parameters_delta = gradient_new * lr[0] parameters_delta_2 = accum_t * momt[0] * lr[0] parameters_delta = parameters_delta + parameters_delta_2 parameters_t = parameters - parameters_delta else: parameters_delta = accum_t * lr[0] parameters_t = parameters - parameters_delta # update stat stat_t = stat_act * 0.0 output_para = np.full(shape, np.nan, dtype) output_accum = np.full(shape, np.nan, dtype) output_stat = np.full(shape, np.nan, dtype) return parameters, gradient, accum, stat, parameters_t, accum_t, stat_t, output_para, output_accum, output_stat
Python
def least_common_multiple(x, y): """get the least common multiple of rhs, lhs.""" if not isinstance(x, int) or not isinstance(y, int): raise TypeError("Input of least common multiple should be integer") return x * y / greatest_common_divisor(x, y)
def least_common_multiple(x, y): """get the least common multiple of rhs, lhs.""" if not isinstance(x, int) or not isinstance(y, int): raise TypeError("Input of least common multiple should be integer") return x * y / greatest_common_divisor(x, y)
Python
def concat_ad(data, axis, wrt_index=0, target="cce"): """ autodiff of concat with one or more input data. Args: data (list[akg.tvm.tensor.Tensor]): input tensors. axis (int): concat axis wrt_index (int): derivative with respect to index (must be less than len(data)). Returns: concatenation result with the given data and axis. """ output = Concat(data, axis, target=target) head = akg.tvm.placeholder(output.shape, output.dtype, name="head") jacs = list(akg.differentiate(output, [data[wrt_index]], head)) return jacs[0], head
def concat_ad(data, axis, wrt_index=0, target="cce"): """ autodiff of concat with one or more input data. Args: data (list[akg.tvm.tensor.Tensor]): input tensors. axis (int): concat axis wrt_index (int): derivative with respect to index (must be less than len(data)). Returns: concatenation result with the given data and axis. """ output = Concat(data, axis, target=target) head = akg.tvm.placeholder(output.shape, output.dtype, name="head") jacs = list(akg.differentiate(output, [data[wrt_index]], head)) return jacs[0], head
Python
def relu6_grad(dy, features): """ Computes Gradients of Rectified Linear 6. Args: dy (tvm.tensor.Tensor): Tensor of type float16, float32, gradients backpropagated to the Relu6 op, . features (tvm.tensor.Tensor): Tensor of type float16, float32, inputs that where passed to the Relu6 op, or its outputs. Returns: tvm.tensor.Tensor, has same type and shape as features. """ check_list = ["float16", "float32"] dtype = features.dtype if not dtype in check_list: raise RuntimeError("relu6_grad only support %s while dtype is %s" % (",".join(check_list), dtype)) shape = [x.value for x in features.shape] utils.check_shape(shape) def grad_dsl(): zeros = 0 res0 = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( features(*i) >= akg.tvm.const(zeros, dtype), features(*i), akg.tvm.const(zeros, dtype) )) res6 = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( features(*i) >= akg.tvm.const(6, dtype), akg.tvm.const(zeros, dtype), res0(*i) )) res = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( res6(*i) == akg.tvm.const(zeros, dtype), akg.tvm.const(zeros, dtype), dy(*i) )) return res return grad_dsl()
def relu6_grad(dy, features): """ Computes Gradients of Rectified Linear 6. Args: dy (tvm.tensor.Tensor): Tensor of type float16, float32, gradients backpropagated to the Relu6 op, . features (tvm.tensor.Tensor): Tensor of type float16, float32, inputs that where passed to the Relu6 op, or its outputs. Returns: tvm.tensor.Tensor, has same type and shape as features. """ check_list = ["float16", "float32"] dtype = features.dtype if not dtype in check_list: raise RuntimeError("relu6_grad only support %s while dtype is %s" % (",".join(check_list), dtype)) shape = [x.value for x in features.shape] utils.check_shape(shape) def grad_dsl(): zeros = 0 res0 = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( features(*i) >= akg.tvm.const(zeros, dtype), features(*i), akg.tvm.const(zeros, dtype) )) res6 = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( features(*i) >= akg.tvm.const(6, dtype), akg.tvm.const(zeros, dtype), res0(*i) )) res = akg.tvm.compute(shape, lambda *i: akg.tvm.if_then_else( res6(*i) == akg.tvm.const(zeros, dtype), akg.tvm.const(zeros, dtype), dy(*i) )) return res return grad_dsl()
Python
def Argmin(data, axis, target=utils.CCE): """ Calculate argmin value on specific axis. Args: data (tvm.tensor.Tensor): Target data. axis (int): A int number for which argmax calculate on. Returns: tvm.tensor.Tensor. As minimum number indexes. Supported Platforms: 'Ascend' """ res, attrs = common(data, axis, "min") return res, attrs
def Argmin(data, axis, target=utils.CCE): """ Calculate argmin value on specific axis. Args: data (tvm.tensor.Tensor): Target data. axis (int): A int number for which argmax calculate on. Returns: tvm.tensor.Tensor. As minimum number indexes. Supported Platforms: 'Ascend' """ res, attrs = common(data, axis, "min") return res, attrs
Python
def kldiv_loss(inputs, outputs, reduction='none'): """ Computes Kullback-Leibler divergence loss between outputs and inputs. In default, loss = outputs*(log(outputs) - log(inputs)), the way using to reduce loss is defined in reduction Args: inputs (tvm.tensor.Tensor): Tensor with type float16, float32 outputs (tvm.tensor.Tensor): Tensor with same type as inputs. reduction (str): uses one of ['sum', 'mean', 'batchmean'] Returns: Tensor with same type as input tensors. """ inputs_dtype = inputs.dtype target_dtype = outputs.dtype utils.ops_dtype_check([inputs_dtype, target_dtype], utils.DtypeForDavinci.ALL_FLOAT) if get_const_tuple(outputs.shape) != get_const_tuple(inputs.shape): raise RuntimeError( "Please ensure inputs have the same size.", outputs.shape, inputs.shape) inputs_dtype_old = inputs_dtype if product_is_mini() and inputs_dtype == 'float32': inputs = akg.topi.cast(inputs, "float16") outputs = akg.topi.cast(outputs, "float16") inputs_dtype = "float16" log_inputs = akg.topi.log(inputs) log_target = akg.topi.log(outputs) loss = akg.topi.subtract(log_target, log_inputs) loss = akg.topi.multiply(outputs, loss) if reduction == 'sum': loss = akg.topi.sum(loss) if reduction == 'mean': loss = akg.topi.sum(loss) deno = 1.0 for num in inputs.shape: deno = deno * num deno = akg.topi.cast(deno, dtype=inputs_dtype) loss = akg.topi.divide(loss, deno) if reduction == 'batchmean': reduce_axis = tuple(numpy.arange(1, len(inputs.shape))) loss = akg.topi.sum(loss, axis=reduce_axis, keepdims=False) deno = 1.0 for num in inputs.shape[1:]: deno = deno * num deno = akg.topi.cast(deno, dtype=inputs_dtype) loss = akg.topi.divide(loss, deno) if product_is_mini() and inputs_dtype_old == 'float32': loss = akg.topi.cast(loss, inputs_dtype_old) return loss
def kldiv_loss(inputs, outputs, reduction='none'): """ Computes Kullback-Leibler divergence loss between outputs and inputs. In default, loss = outputs*(log(outputs) - log(inputs)), the way using to reduce loss is defined in reduction Args: inputs (tvm.tensor.Tensor): Tensor with type float16, float32 outputs (tvm.tensor.Tensor): Tensor with same type as inputs. reduction (str): uses one of ['sum', 'mean', 'batchmean'] Returns: Tensor with same type as input tensors. """ inputs_dtype = inputs.dtype target_dtype = outputs.dtype utils.ops_dtype_check([inputs_dtype, target_dtype], utils.DtypeForDavinci.ALL_FLOAT) if get_const_tuple(outputs.shape) != get_const_tuple(inputs.shape): raise RuntimeError( "Please ensure inputs have the same size.", outputs.shape, inputs.shape) inputs_dtype_old = inputs_dtype if product_is_mini() and inputs_dtype == 'float32': inputs = akg.topi.cast(inputs, "float16") outputs = akg.topi.cast(outputs, "float16") inputs_dtype = "float16" log_inputs = akg.topi.log(inputs) log_target = akg.topi.log(outputs) loss = akg.topi.subtract(log_target, log_inputs) loss = akg.topi.multiply(outputs, loss) if reduction == 'sum': loss = akg.topi.sum(loss) if reduction == 'mean': loss = akg.topi.sum(loss) deno = 1.0 for num in inputs.shape: deno = deno * num deno = akg.topi.cast(deno, dtype=inputs_dtype) loss = akg.topi.divide(loss, deno) if reduction == 'batchmean': reduce_axis = tuple(numpy.arange(1, len(inputs.shape))) loss = akg.topi.sum(loss, axis=reduce_axis, keepdims=False) deno = 1.0 for num in inputs.shape[1:]: deno = deno * num deno = akg.topi.cast(deno, dtype=inputs_dtype) loss = akg.topi.divide(loss, deno) if product_is_mini() and inputs_dtype_old == 'float32': loss = akg.topi.cast(loss, inputs_dtype_old) return loss
Python
def smooth_l1_loss_grad(dloss, prediction, tar, anchor_samples, sigma, anchor_sample_correct): """ do backprop for smooth L1 loss (Huber loss) Args: dloss (tvm.tensor.Tensor): Tensor [x,y], derivative of loss. prediction (tvm.tensor.Tensor): Tensor [x,y,z], output of the forward pass. tar (tvm.tensor.Tensor): Tensor [x,y,z], ground truth. anchor_samples (tvm.tensor.Tensor): Tensor [x,y], == anchor_sample_correct indicates correct classification, otherwise no meaning. sigma (float): Constant parameter. anchor_sample_correct (int): Constant parameter. Returns: dpredirection (tvm.tensor.Tensor): output tensor [x,y,z] """ if len(dloss.shape) != len(anchor_samples.shape): raise RuntimeError("anchor_samples shape should equal to dloss shape!") if len(prediction.shape) != len(tar.shape): raise RuntimeError("prediction shape should equal to tar shape!") if (len(dloss.shape) + 1) != len(prediction.shape): raise RuntimeError("prediction shape should be dloss shape + 1!") out_shape = get_shape(prediction) original_dtype = dloss.dtype utils.ops_dtype_check(original_dtype, utils.DtypeForDavinci.ALL_FLOAT) dim_info, _ = smooth_l1_loss_grad_set_dim_func( dloss, prediction, tar, anchor_samples, sigma, anchor_sample_correct) attrs = {DIM: dim_info} if product_is_mini(): dtype = "float16" else: dtype = original_dtype # unify the data type of tensors if dloss.dtype != dtype: dloss = akg.topi.cast(dloss, dtype) if prediction.dtype != dtype: prediction = akg.topi.cast(prediction, dtype) if tar.dtype != dtype: tar = akg.topi.cast(tar, dtype) if anchor_samples.dtype != dtype: anchor_samples = akg.topi.cast(anchor_samples, dtype) def eltwise_compute_func(_prediction, _target, _dloss, _anchor_sample, dtype): _diff = akg.tvm.expr.Sub(_prediction, _target) _first_branch = akg.tvm.expr.Mul(_diff, akg.tvm.const(sigma * sigma, dtype)) _second_branch = akg.tvm.expr.Select( akg.tvm.const(0, dtype) < _diff, akg.tvm.const(1, dtype), akg.tvm.const(-1, dtype)) _abs_diff = akg.tvm.expr.Mul(_second_branch, _diff) _derivative = akg.tvm.expr.Select(_abs_diff <= akg.tvm.const( 1.0 / (sigma * sigma), dtype), _first_branch, _second_branch) _mult_dloss = akg.tvm.expr.Mul(_derivative, _dloss) _output = akg.tvm.expr.Select( _anchor_sample == anchor_sample_correct, akg.tvm.const(0, dtype), _mult_dloss) return _output dprediction = akg.tvm.compute(out_shape, lambda *i: eltwise_compute_func( prediction(*i), tar(*i), dloss(*i[:-1]), anchor_samples(*i[:-1]), dtype)) if dprediction.dtype.lower() != original_dtype: dprediction = akg.topi.cast(dprediction, original_dtype) return dprediction, attrs
def smooth_l1_loss_grad(dloss, prediction, tar, anchor_samples, sigma, anchor_sample_correct): """ do backprop for smooth L1 loss (Huber loss) Args: dloss (tvm.tensor.Tensor): Tensor [x,y], derivative of loss. prediction (tvm.tensor.Tensor): Tensor [x,y,z], output of the forward pass. tar (tvm.tensor.Tensor): Tensor [x,y,z], ground truth. anchor_samples (tvm.tensor.Tensor): Tensor [x,y], == anchor_sample_correct indicates correct classification, otherwise no meaning. sigma (float): Constant parameter. anchor_sample_correct (int): Constant parameter. Returns: dpredirection (tvm.tensor.Tensor): output tensor [x,y,z] """ if len(dloss.shape) != len(anchor_samples.shape): raise RuntimeError("anchor_samples shape should equal to dloss shape!") if len(prediction.shape) != len(tar.shape): raise RuntimeError("prediction shape should equal to tar shape!") if (len(dloss.shape) + 1) != len(prediction.shape): raise RuntimeError("prediction shape should be dloss shape + 1!") out_shape = get_shape(prediction) original_dtype = dloss.dtype utils.ops_dtype_check(original_dtype, utils.DtypeForDavinci.ALL_FLOAT) dim_info, _ = smooth_l1_loss_grad_set_dim_func( dloss, prediction, tar, anchor_samples, sigma, anchor_sample_correct) attrs = {DIM: dim_info} if product_is_mini(): dtype = "float16" else: dtype = original_dtype # unify the data type of tensors if dloss.dtype != dtype: dloss = akg.topi.cast(dloss, dtype) if prediction.dtype != dtype: prediction = akg.topi.cast(prediction, dtype) if tar.dtype != dtype: tar = akg.topi.cast(tar, dtype) if anchor_samples.dtype != dtype: anchor_samples = akg.topi.cast(anchor_samples, dtype) def eltwise_compute_func(_prediction, _target, _dloss, _anchor_sample, dtype): _diff = akg.tvm.expr.Sub(_prediction, _target) _first_branch = akg.tvm.expr.Mul(_diff, akg.tvm.const(sigma * sigma, dtype)) _second_branch = akg.tvm.expr.Select( akg.tvm.const(0, dtype) < _diff, akg.tvm.const(1, dtype), akg.tvm.const(-1, dtype)) _abs_diff = akg.tvm.expr.Mul(_second_branch, _diff) _derivative = akg.tvm.expr.Select(_abs_diff <= akg.tvm.const( 1.0 / (sigma * sigma), dtype), _first_branch, _second_branch) _mult_dloss = akg.tvm.expr.Mul(_derivative, _dloss) _output = akg.tvm.expr.Select( _anchor_sample == anchor_sample_correct, akg.tvm.const(0, dtype), _mult_dloss) return _output dprediction = akg.tvm.compute(out_shape, lambda *i: eltwise_compute_func( prediction(*i), tar(*i), dloss(*i[:-1]), anchor_samples(*i[:-1]), dtype)) if dprediction.dtype.lower() != original_dtype: dprediction = akg.topi.cast(dprediction, original_dtype) return dprediction, attrs
Python
def smooth_l1_loss_grad_get_dim(shape): """ get dim attr for smooth L1 loss grad Args: shape: the shape of prediction tensor (e.g. [8, 4718, 4]) Returns: dim string for akg.op.build(attrs=...) """ # example shape: [8, 4718, 4] # cut dim: ((1,1), (1024,1024)) tensor_size = 1 for i in shape[:-1]: tensor_size *= i # if tensor_size >= threshold, cut ub_size = 256 * 1024 # estimated maximum number of data copies in UB num_data_copies = 32 data_size = 4 # do not cut the last dim max_tensor_size = int(ub_size / data_size / num_data_copies / shape[-1]) if tensor_size > max_tensor_size: # find the largest divisor of tensor_size to be the tile size # currently the dim size must be divisible by tile size tile_size = 1 for i in range(max_tensor_size, 1, -1): if tensor_size % i == 0: tile_size = i break # generate setdim string info = dim.Dim() # do not cut last dim for i in range(0, len(shape) - 2): info.setdim(index=0, axis=i, tilel1=1, tilel0=1) # cut -2 dim info.setdim(index=0, axis=len(shape) - 2, tilel1=tile_size, tilel0=tile_size) return str(info) return ''
def smooth_l1_loss_grad_get_dim(shape): """ get dim attr for smooth L1 loss grad Args: shape: the shape of prediction tensor (e.g. [8, 4718, 4]) Returns: dim string for akg.op.build(attrs=...) """ # example shape: [8, 4718, 4] # cut dim: ((1,1), (1024,1024)) tensor_size = 1 for i in shape[:-1]: tensor_size *= i # if tensor_size >= threshold, cut ub_size = 256 * 1024 # estimated maximum number of data copies in UB num_data_copies = 32 data_size = 4 # do not cut the last dim max_tensor_size = int(ub_size / data_size / num_data_copies / shape[-1]) if tensor_size > max_tensor_size: # find the largest divisor of tensor_size to be the tile size # currently the dim size must be divisible by tile size tile_size = 1 for i in range(max_tensor_size, 1, -1): if tensor_size % i == 0: tile_size = i break # generate setdim string info = dim.Dim() # do not cut last dim for i in range(0, len(shape) - 2): info.setdim(index=0, axis=i, tilel1=1, tilel0=1) # cut -2 dim info.setdim(index=0, axis=len(shape) - 2, tilel1=tile_size, tilel0=tile_size) return str(info) return ''
Python
def intrin_col2im(input_shape, output_shape, kernel, stride, pad, dtype): """ Compute col2im via cce col2im intrin function call directly Args: input_shape: the shape of the image output_shape: the shape of the result of im2col given the input image kernel: kernel sizes for im2col stride: stride sizes for im2col pad: padding sizes for im2col, including padding top, bottom, left, and right dtype: type of the data Return: cce intrin function call for col2im """ input_w, input_h = input_shape output_w, output_h = output_shape pad_left, pad_right, pad_top, pad_bottom = pad w_idx_kernel = 0 h_idx_kernel = 0 w_idx = (-pad_left) & 0xffff h_idx = (-pad_top) & 0xffff c1_idx = 0 stride_w, stride_h = stride kernel_w, kernel_h = kernel dilation_w = dilation_h = 1 jump_offset = 0 repeat_mode = 0 repeat_time = (output_w * output_h + 15) // 16 input_b = 1 input_c1 = 1 input_h_tile = 1 input_w_tile = 1 input_c0 = 16 input_shape = (input_b, input_c1, input_h_tile, input_w_tile, kernel_w, kernel_h, output_w, output_h, input_c0) input_data = akg.tvm.placeholder(input_shape, dtype=dtype) result = akg.tvm.compute((input_w, input_h, input_c0), lambda h, w, c0: input_data[0, 0, 0, 0, h // kernel_h, w // kernel_w, h % kernel_h, w % kernel_w, c0], name='col2im_intrinsic') input_data_buff = akg.tvm.decl_buffer(input_data.shape, input_data.dtype, name="input_data_buff", offset_factor=1, scope="local.UB") result_buff = akg.tvm.decl_buffer(result.shape, result.dtype, name="result_buff", offset_factor=1, scope="local.UB") def pack_args(sp): if len(sp) != 20: raise RuntimeError("20 args are expected to pack but got {}" "".format(len(sp))) # fcol2img = (sp[0] & 0xffff) << 0 | (sp[1] & 0xffff) << 16 # | (sp[2] & 0xff) << 32 | (sp[3] & 0xff) << 40 # | (sp[4] & 0xff) << 48 | (sp[5] & 0xff) << 56 # Xm = (sp[6] & 0xff) << 16 | (sp[7] & 0xff) << 24 # | (sp[8] & 0xffff) << 32 | (sp[9] & 0xffff) << 48 # | (sp[10] & 0xfff) << 0 # Xt = (sp[11] & 63) << 0 | (sp[12] & 63) << 6 # | (sp[13] & 0xff) << 12 | (sp[14] & 0xff) << 20 # | (sp[15] & 0xff) << 28 | (sp[16] & 0xff) << 36 # | (sp[17] & 0xff) << 44 | (sp[18] & 1) << 52 | (sp[19] & 0xff) << 56 fcol2img = akg.tvm.const(sp[0], 'uint64') + akg.tvm.const(sp[1] * 2**16, 'uint64') \ + akg.tvm.const(sp[2] * 2**32, 'uint64') + akg.tvm.const(sp[3] * 2**40, 'uint64') \ + akg.tvm.const(sp[4] * 2**48, 'uint64') + akg.tvm.const(sp[5] * 2**56, 'uint64') xm = akg.tvm.const(sp[6] * 2**16, 'uint64') + akg.tvm.const(sp[7] * 2**24, 'uint64') \ + akg.tvm.const(sp[8] * 2**32, 'uint64') + akg.tvm.const(sp[9] * 2**48, 'uint64') \ + akg.tvm.const(sp[10], 'uint64') xt = akg.tvm.const(sp[11], 'uint64') + akg.tvm.const(sp[12] * 2**6, 'uint64') \ + akg.tvm.const(sp[13] * 2**12, 'uint64') + akg.tvm.const(sp[14] * 2**20, 'uint64') \ + akg.tvm.const(sp[15] * 2**28, 'uint64') + akg.tvm.const(sp[16] * 2**36, 'uint64') \ + akg.tvm.const(sp[17] * 2**44, 'uint64') + akg.tvm.const(sp[18] * 2**52, 'uint64') \ + akg.tvm.const(sp[19] * 2**56, 'uint64') return (fcol2img, xm, xt) def intrin_func(ins, outs): sp = [input_w, input_h, pad_left, pad_right, pad_top, pad_bottom, # fmatrix w_idx_kernel, h_idx_kernel, w_idx, h_idx, c1_idx, # xm stride_w, stride_h, kernel_w, kernel_h, dilation_w, dilation_h, jump_offset, repeat_mode, repeat_time] aa = ins[0] bb = outs[0] ib = akg.tvm.ir_builder.create() fcol2img, xm, xt = pack_args(sp) ib.emit(akg.tvm.call_extern(dtype, "set_fcol2img", fcol2img)) ib.emit(akg.tvm.call_extern(dtype, "vector_dup", bb.access_ptr("w"), 0, (input_w * input_h * 16 + 63) // 64, 1, 1, 8, 8)) for kh in range(kernel_h): for kw in range(kernel_w): sp[6] = kw sp[7] = kh _, xm, xt = pack_args(sp) offset = (kh * kernel_h + kw) * output_h * output_w * 16 ib.emit(akg.tvm.call_extern(dtype, "col2img", bb.access_ptr("rw"), aa.access_ptr("r", offset=offset), xm, xt)) return ib.get() with akg.tvm.build_config(offset_factor=1): return akg.tvm.decl_tensor_intrin(result.op, intrin_func, binds={input_data: input_data_buff, result: result_buff})
def intrin_col2im(input_shape, output_shape, kernel, stride, pad, dtype): """ Compute col2im via cce col2im intrin function call directly Args: input_shape: the shape of the image output_shape: the shape of the result of im2col given the input image kernel: kernel sizes for im2col stride: stride sizes for im2col pad: padding sizes for im2col, including padding top, bottom, left, and right dtype: type of the data Return: cce intrin function call for col2im """ input_w, input_h = input_shape output_w, output_h = output_shape pad_left, pad_right, pad_top, pad_bottom = pad w_idx_kernel = 0 h_idx_kernel = 0 w_idx = (-pad_left) & 0xffff h_idx = (-pad_top) & 0xffff c1_idx = 0 stride_w, stride_h = stride kernel_w, kernel_h = kernel dilation_w = dilation_h = 1 jump_offset = 0 repeat_mode = 0 repeat_time = (output_w * output_h + 15) // 16 input_b = 1 input_c1 = 1 input_h_tile = 1 input_w_tile = 1 input_c0 = 16 input_shape = (input_b, input_c1, input_h_tile, input_w_tile, kernel_w, kernel_h, output_w, output_h, input_c0) input_data = akg.tvm.placeholder(input_shape, dtype=dtype) result = akg.tvm.compute((input_w, input_h, input_c0), lambda h, w, c0: input_data[0, 0, 0, 0, h // kernel_h, w // kernel_w, h % kernel_h, w % kernel_w, c0], name='col2im_intrinsic') input_data_buff = akg.tvm.decl_buffer(input_data.shape, input_data.dtype, name="input_data_buff", offset_factor=1, scope="local.UB") result_buff = akg.tvm.decl_buffer(result.shape, result.dtype, name="result_buff", offset_factor=1, scope="local.UB") def pack_args(sp): if len(sp) != 20: raise RuntimeError("20 args are expected to pack but got {}" "".format(len(sp))) # fcol2img = (sp[0] & 0xffff) << 0 | (sp[1] & 0xffff) << 16 # | (sp[2] & 0xff) << 32 | (sp[3] & 0xff) << 40 # | (sp[4] & 0xff) << 48 | (sp[5] & 0xff) << 56 # Xm = (sp[6] & 0xff) << 16 | (sp[7] & 0xff) << 24 # | (sp[8] & 0xffff) << 32 | (sp[9] & 0xffff) << 48 # | (sp[10] & 0xfff) << 0 # Xt = (sp[11] & 63) << 0 | (sp[12] & 63) << 6 # | (sp[13] & 0xff) << 12 | (sp[14] & 0xff) << 20 # | (sp[15] & 0xff) << 28 | (sp[16] & 0xff) << 36 # | (sp[17] & 0xff) << 44 | (sp[18] & 1) << 52 | (sp[19] & 0xff) << 56 fcol2img = akg.tvm.const(sp[0], 'uint64') + akg.tvm.const(sp[1] * 2**16, 'uint64') \ + akg.tvm.const(sp[2] * 2**32, 'uint64') + akg.tvm.const(sp[3] * 2**40, 'uint64') \ + akg.tvm.const(sp[4] * 2**48, 'uint64') + akg.tvm.const(sp[5] * 2**56, 'uint64') xm = akg.tvm.const(sp[6] * 2**16, 'uint64') + akg.tvm.const(sp[7] * 2**24, 'uint64') \ + akg.tvm.const(sp[8] * 2**32, 'uint64') + akg.tvm.const(sp[9] * 2**48, 'uint64') \ + akg.tvm.const(sp[10], 'uint64') xt = akg.tvm.const(sp[11], 'uint64') + akg.tvm.const(sp[12] * 2**6, 'uint64') \ + akg.tvm.const(sp[13] * 2**12, 'uint64') + akg.tvm.const(sp[14] * 2**20, 'uint64') \ + akg.tvm.const(sp[15] * 2**28, 'uint64') + akg.tvm.const(sp[16] * 2**36, 'uint64') \ + akg.tvm.const(sp[17] * 2**44, 'uint64') + akg.tvm.const(sp[18] * 2**52, 'uint64') \ + akg.tvm.const(sp[19] * 2**56, 'uint64') return (fcol2img, xm, xt) def intrin_func(ins, outs): sp = [input_w, input_h, pad_left, pad_right, pad_top, pad_bottom, # fmatrix w_idx_kernel, h_idx_kernel, w_idx, h_idx, c1_idx, # xm stride_w, stride_h, kernel_w, kernel_h, dilation_w, dilation_h, jump_offset, repeat_mode, repeat_time] aa = ins[0] bb = outs[0] ib = akg.tvm.ir_builder.create() fcol2img, xm, xt = pack_args(sp) ib.emit(akg.tvm.call_extern(dtype, "set_fcol2img", fcol2img)) ib.emit(akg.tvm.call_extern(dtype, "vector_dup", bb.access_ptr("w"), 0, (input_w * input_h * 16 + 63) // 64, 1, 1, 8, 8)) for kh in range(kernel_h): for kw in range(kernel_w): sp[6] = kw sp[7] = kh _, xm, xt = pack_args(sp) offset = (kh * kernel_h + kw) * output_h * output_w * 16 ib.emit(akg.tvm.call_extern(dtype, "col2img", bb.access_ptr("rw"), aa.access_ptr("r", offset=offset), xm, xt)) return ib.get() with akg.tvm.build_config(offset_factor=1): return akg.tvm.decl_tensor_intrin(result.op, intrin_func, binds={input_data: input_data_buff, result: result_buff})
Python
def bessel_i0e(x, target=utils.CCE): """ The modified Bessel i0e function. ..math:: `I0e(x) = (e^{-|x|}) * (1 + ( (x/2) / (1!) )^2 + ((x/2)^2 / (2!))^2 + ... + ((x/2)^n / (n!)) ^2)` Args: x (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor. The modified Bessel i0e function of x element-wise. Has the same type as x. """ # check shape utils.check_shape(x) # check input tensor data_type utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT) res = _bessel_i0e_compute(x) return res
def bessel_i0e(x, target=utils.CCE): """ The modified Bessel i0e function. ..math:: `I0e(x) = (e^{-|x|}) * (1 + ( (x/2) / (1!) )^2 + ((x/2)^2 / (2!))^2 + ... + ((x/2)^n / (n!)) ^2)` Args: x (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor. The modified Bessel i0e function of x element-wise. Has the same type as x. """ # check shape utils.check_shape(x) # check input tensor data_type utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT) res = _bessel_i0e_compute(x) return res
Python
def bernoulli_logprob_ad(head, x, probs): """ An example of differentiating bernoulli.logprob in all inputs and paramters Args: head: The adjoint of the output, in other words, some tensors, by which the Jacobians will be multiplied x: input, tenosor of 0 or 1 probs: probabilities of random variables taking values 1 """ mod = bernoulli.bernoulli(probs).log_prob(x) auto_diff_outs = list(akg.differentiate(mod, [x, probs], head)) return auto_diff_outs
def bernoulli_logprob_ad(head, x, probs): """ An example of differentiating bernoulli.logprob in all inputs and paramters Args: head: The adjoint of the output, in other words, some tensors, by which the Jacobians will be multiplied x: input, tenosor of 0 or 1 probs: probabilities of random variables taking values 1 """ mod = bernoulli.bernoulli(probs).log_prob(x) auto_diff_outs = list(akg.differentiate(mod, [x, probs], head)) return auto_diff_outs
Python
def batch_norm_tiling_strategy_dynamic(tensor): """Custom tiling strategy for fused_batch_norm op with dynamic shape.""" strategy = list() forbid_iso = False full_tile_reduce = False multicore_axis = 0 c0_axis = 4 w_axis = 3 h_axis = 2 c1_axis = 1 for i, _ in enumerate(tensor.shape): if i in [w_axis, c0_axis]: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) elif i == h_axis and full_tile_reduce: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) elif i == c1_axis and full_tile_reduce: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=4, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) elif i == multicore_axis: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) elif forbid_iso: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=i) return strategy
def batch_norm_tiling_strategy_dynamic(tensor): """Custom tiling strategy for fused_batch_norm op with dynamic shape.""" strategy = list() forbid_iso = False full_tile_reduce = False multicore_axis = 0 c0_axis = 4 w_axis = 3 h_axis = 2 c1_axis = 1 for i, _ in enumerate(tensor.shape): if i in [w_axis, c0_axis]: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) elif i == h_axis and full_tile_reduce: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=i) elif i == c1_axis and full_tile_reduce: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=4, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) elif i == multicore_axis: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=i) elif forbid_iso: strategy += ct_util.create_constraint_on_tensor(tensor=tensor, values=1, constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=i) return strategy
Python
def batch_norm_tiling_strategy(tensor, tensor_format): """Custom tiling strategy for fused_batch_norm op""" if tensor_format == "DefaultFormat": return list() if tensor_format == "NC1HWC0": multi_core_axis = 1 c0_axis = 4 dim = 5 elif tensor_format == "NHWC": multi_core_axis = 3 c0_axis = None dim = 4 else: multi_core_axis = 1 c0_axis = None dim = 4 strategy = list() if dim != 4 or get_shape(tensor)[multi_core_axis] != 1: strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=multi_core_axis) if c0_axis: strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=c0_axis) for i in range(dim): strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=i) strategy.append(ct_util.modify_common_constraints( value=0.7, constraint=ct_util.TileConstraint.SET_MEM_RATIO)) return strategy
def batch_norm_tiling_strategy(tensor, tensor_format): """Custom tiling strategy for fused_batch_norm op""" if tensor_format == "DefaultFormat": return list() if tensor_format == "NC1HWC0": multi_core_axis = 1 c0_axis = 4 dim = 5 elif tensor_format == "NHWC": multi_core_axis = 3 c0_axis = None dim = 4 else: multi_core_axis = 1 c0_axis = None dim = 4 strategy = list() if dim != 4 or get_shape(tensor)[multi_core_axis] != 1: strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FACTOR, tensor_pos=multi_core_axis) if c0_axis: strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values="FULL", constraints=ct_util.TileConstraint.MAX, tensor_pos=c0_axis) for i in range(dim): strategy += ct_util.create_constraint_on_tensor( tensor=tensor, values=1, constraints=ct_util.TileConstraint.FORBID_ISOLATE, tensor_pos=i) strategy.append(ct_util.modify_common_constraints( value=0.7, constraint=ct_util.TileConstraint.SET_MEM_RATIO)) return strategy
Python
def check_inputs(data, gamma, beta, moving_mean, moving_variance, data_format, axis): """check inputs availability for fused_batch_norm op and get params""" if any(data.dtype != t.dtype for t in [gamma, beta, moving_mean, moving_variance]): raise AssertionError("All input tensors should have same dtype!") utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT) dataformat_list = ["NHWC", "NC1HWC0", "NCHW", "DefaultFormat"] if data_format not in dataformat_list: raise AssertionError("fused_batch_norm only support %s while data_format " "is %s" % (",".join(dataformat_list), data_format)) shape = get_shape(data) in_rank = len(shape) is_special5d = (data_format == "NC1HWC0") if in_rank <= 1: raise AssertionError("Do not support 1D data.") if data_format == "DefaultFormat": if not isinstance(axis, int): raise RuntimeError("axis should be instance of int but {}" "".format(axis)) if axis not in range(-in_rank, in_rank): raise AssertionError("axis must be in range [%d, %d)" "" % (-in_rank, in_rank)) if axis < 0: axis = in_rank + axis elif data_format == "NHWC": if in_rank != 4: raise AssertionError("Data shape {} mismatch data_format \"NHWC\"." "".format(data.shape)) axis = 3 elif data_format == "NCHW": if in_rank != 4: raise AssertionError("Data shape {} mismatch data_format \"NCHW\"." "".format(data.shape)) axis = 1 else: axis = 1 if is_special5d: def is_all_1_but_axis_equal(shape1, shape2, axis): if not isinstance(axis, (list, tuple)): axis = (axis,) for i, _ in enumerate(shape2): if i not in axis: if isinstance(shape1[i], akg.tvm.expr.Var) or int(shape1[i]) != 1: return False else: if isinstance(shape1[i], akg.tvm.expr.Var): if shape1[i] != shape2[i]: return False else: if int(shape1[i]) != int(shape2[i]): return False return True if len(data.shape) != 5: raise AssertionError("data shape {} mismatch data_format " "\"NC1HWC0\".".format(data.shape)) if len(gamma.shape) != 5 \ or not is_all_1_but_axis_equal(gamma.shape, shape, (1, 4)): raise AssertionError("gamma mismatch NC1HWC0 data (while gamma shape " "is {}, input shape is {})!!!".format( gamma.shape, data.shape)) if len(beta.shape) != 5 \ or not is_all_1_but_axis_equal(beta.shape, shape, (1, 4)): raise AssertionError("beta mismatch NC1HWC0 data (while beta shape " "is {}, input shape is {})!!!".format( beta.shape, data.shape)) if len(moving_mean.shape) != 5 \ or not is_all_1_but_axis_equal(moving_mean.shape, shape, (1, 4)): raise AssertionError("moving_mean mismatch NC1HWC0 data (while " "moving_mean shape is {}, input shape is " "{})!!!".format(beta.shape, data.shape)) if len(moving_variance.shape) != 5 \ or not is_all_1_but_axis_equal(moving_variance.shape, shape, (1, 4)): raise AssertionError("moving_variance mismatch NC1HWC0 data (while " "moving_variance shape is {}, input shape is " "{})!!!".format(beta.shape, data.shape)) else: if len(gamma.shape) != 1 or (gamma.shape[0].value != shape[axis]): raise AssertionError("gamma mismatch the channel axis(while gamma " "shape is {}, input shape is {}, and axis is " "{})!!!".format(gamma.shape, data.shape, axis)) if len(beta.shape) != 1 or (beta.shape[0].value != shape[axis]): raise AssertionError("beta mismatch the channel axis(while beta shape" " is {}, input shape is {}, and axis is " "{})!!!".format(beta.shape, data.shape, axis)) if len(moving_mean.shape) != 1 \ or (moving_mean.shape[0].value != shape[axis]): raise AssertionError("moving_mean mismatch the channel axis(while " "moving_mean shape is {}, input shape is {}, " "and axis is {})!!!".format( moving_mean.shape, data.shape, axis)) if len(moving_variance.shape) != 1 \ or (moving_variance.shape[0].value != shape[axis]): raise AssertionError("moving_variance mismatch the channel axis(while" " moving_variance shape is {}, input shape is " "{}, and axis is {})!!!".format(moving_variance.shape, data.shape, axis)) if is_special5d: axes = [3, 2, 0] mid_shape = [1, shape[1], 1, 1, shape[4]] else: axes = [i for i in range(in_rank - 1, -1, -1) if i != axis] mid_shape = [1] * in_rank mid_shape[axis] = shape[axis] out_params = { "is_special5d": is_special5d, "axis": axis, "axes": tuple(axes), "mid_shape": mid_shape } return out_params
def check_inputs(data, gamma, beta, moving_mean, moving_variance, data_format, axis): """check inputs availability for fused_batch_norm op and get params""" if any(data.dtype != t.dtype for t in [gamma, beta, moving_mean, moving_variance]): raise AssertionError("All input tensors should have same dtype!") utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_FLOAT) dataformat_list = ["NHWC", "NC1HWC0", "NCHW", "DefaultFormat"] if data_format not in dataformat_list: raise AssertionError("fused_batch_norm only support %s while data_format " "is %s" % (",".join(dataformat_list), data_format)) shape = get_shape(data) in_rank = len(shape) is_special5d = (data_format == "NC1HWC0") if in_rank <= 1: raise AssertionError("Do not support 1D data.") if data_format == "DefaultFormat": if not isinstance(axis, int): raise RuntimeError("axis should be instance of int but {}" "".format(axis)) if axis not in range(-in_rank, in_rank): raise AssertionError("axis must be in range [%d, %d)" "" % (-in_rank, in_rank)) if axis < 0: axis = in_rank + axis elif data_format == "NHWC": if in_rank != 4: raise AssertionError("Data shape {} mismatch data_format \"NHWC\"." "".format(data.shape)) axis = 3 elif data_format == "NCHW": if in_rank != 4: raise AssertionError("Data shape {} mismatch data_format \"NCHW\"." "".format(data.shape)) axis = 1 else: axis = 1 if is_special5d: def is_all_1_but_axis_equal(shape1, shape2, axis): if not isinstance(axis, (list, tuple)): axis = (axis,) for i, _ in enumerate(shape2): if i not in axis: if isinstance(shape1[i], akg.tvm.expr.Var) or int(shape1[i]) != 1: return False else: if isinstance(shape1[i], akg.tvm.expr.Var): if shape1[i] != shape2[i]: return False else: if int(shape1[i]) != int(shape2[i]): return False return True if len(data.shape) != 5: raise AssertionError("data shape {} mismatch data_format " "\"NC1HWC0\".".format(data.shape)) if len(gamma.shape) != 5 \ or not is_all_1_but_axis_equal(gamma.shape, shape, (1, 4)): raise AssertionError("gamma mismatch NC1HWC0 data (while gamma shape " "is {}, input shape is {})!!!".format( gamma.shape, data.shape)) if len(beta.shape) != 5 \ or not is_all_1_but_axis_equal(beta.shape, shape, (1, 4)): raise AssertionError("beta mismatch NC1HWC0 data (while beta shape " "is {}, input shape is {})!!!".format( beta.shape, data.shape)) if len(moving_mean.shape) != 5 \ or not is_all_1_but_axis_equal(moving_mean.shape, shape, (1, 4)): raise AssertionError("moving_mean mismatch NC1HWC0 data (while " "moving_mean shape is {}, input shape is " "{})!!!".format(beta.shape, data.shape)) if len(moving_variance.shape) != 5 \ or not is_all_1_but_axis_equal(moving_variance.shape, shape, (1, 4)): raise AssertionError("moving_variance mismatch NC1HWC0 data (while " "moving_variance shape is {}, input shape is " "{})!!!".format(beta.shape, data.shape)) else: if len(gamma.shape) != 1 or (gamma.shape[0].value != shape[axis]): raise AssertionError("gamma mismatch the channel axis(while gamma " "shape is {}, input shape is {}, and axis is " "{})!!!".format(gamma.shape, data.shape, axis)) if len(beta.shape) != 1 or (beta.shape[0].value != shape[axis]): raise AssertionError("beta mismatch the channel axis(while beta shape" " is {}, input shape is {}, and axis is " "{})!!!".format(beta.shape, data.shape, axis)) if len(moving_mean.shape) != 1 \ or (moving_mean.shape[0].value != shape[axis]): raise AssertionError("moving_mean mismatch the channel axis(while " "moving_mean shape is {}, input shape is {}, " "and axis is {})!!!".format( moving_mean.shape, data.shape, axis)) if len(moving_variance.shape) != 1 \ or (moving_variance.shape[0].value != shape[axis]): raise AssertionError("moving_variance mismatch the channel axis(while" " moving_variance shape is {}, input shape is " "{}, and axis is {})!!!".format(moving_variance.shape, data.shape, axis)) if is_special5d: axes = [3, 2, 0] mid_shape = [1, shape[1], 1, 1, shape[4]] else: axes = [i for i in range(in_rank - 1, -1, -1) if i != axis] mid_shape = [1] * in_rank mid_shape[axis] = shape[axis] out_params = { "is_special5d": is_special5d, "axis": axis, "axes": tuple(axes), "mid_shape": mid_shape } return out_params
Python
def sum_data(data, axes, keepdims, single_sum=False): """different solutions for sum multi axes""" if single_sum: data = akg.topi.sum(data, axis=axes, keepdims=keepdims) else: data = mul_axis_sum(data, axes, keepdims) return data
def sum_data(data, axes, keepdims, single_sum=False): """different solutions for sum multi axes""" if single_sum: data = akg.topi.sum(data, axis=axes, keepdims=keepdims) else: data = mul_axis_sum(data, axes, keepdims) return data
Python
def SparseSoftmaxCrossEntropyWithLogitsAd(labels, logits, reduction='mean', grad_scale=1.0, target=utils.CCE): """ Compute gradient for sparse_softmax_cross_entropy_with_logits operator using automatic differentiate. Supported Platforms: 'Ascend' """ attr_map = {} def custom_softmax_cross_entropy_with_logits_fdiff(out, inputs, grad, attrs, new_pld_array): strategy, _, backprop = sparse_softmax_cross_entropy_with_logits_impl(inputs[1], inputs[0], reduction=reduction, scale=grad_scale) if strategy: attr_map["custom_tiling"] = strategy return [backprop] l_value, _ = SparseSoftmaxCrossEntropyWithLogits(labels, logits, reduction) head = akg.tvm.compute(l_value.shape, lambda *i: akg.tvm.const(1.0, l_value.dtype), name='head') [dl_dlogits] = akg.differentiate(l_value, [logits], head, None, None, override={l_value: ([logits, labels], custom_softmax_cross_entropy_with_logits_fdiff)}) return dl_dlogits, attr_map
def SparseSoftmaxCrossEntropyWithLogitsAd(labels, logits, reduction='mean', grad_scale=1.0, target=utils.CCE): """ Compute gradient for sparse_softmax_cross_entropy_with_logits operator using automatic differentiate. Supported Platforms: 'Ascend' """ attr_map = {} def custom_softmax_cross_entropy_with_logits_fdiff(out, inputs, grad, attrs, new_pld_array): strategy, _, backprop = sparse_softmax_cross_entropy_with_logits_impl(inputs[1], inputs[0], reduction=reduction, scale=grad_scale) if strategy: attr_map["custom_tiling"] = strategy return [backprop] l_value, _ = SparseSoftmaxCrossEntropyWithLogits(labels, logits, reduction) head = akg.tvm.compute(l_value.shape, lambda *i: akg.tvm.const(1.0, l_value.dtype), name='head') [dl_dlogits] = akg.differentiate(l_value, [logits], head, None, None, override={l_value: ([logits, labels], custom_softmax_cross_entropy_with_logits_fdiff)}) return dl_dlogits, attr_map
Python
def save_op_output_dtype(func, *args): """ Save op's output dtype, when first call the template api, Note: we will save the dtype. Before auto scheduling,get the dtype and convert the res tensor to this dtype, and set the dtype to None. """ global op_output_dtype if op_output_dtype is None: if func.__name__ == "broadcast": if isinstance(args[0], int): output_dtype = "int32" elif isinstance(args[0], float): output_dtype = "float16" else: output_dtype = args[0].dtype elif func.__name__ == "concat": output_dtype = args[0][0].dtype else: output_dtype = args[0].dtype op_output_dtype = output_dtype
def save_op_output_dtype(func, *args): """ Save op's output dtype, when first call the template api, Note: we will save the dtype. Before auto scheduling,get the dtype and convert the res tensor to this dtype, and set the dtype to None. """ global op_output_dtype if op_output_dtype is None: if func.__name__ == "broadcast": if isinstance(args[0], int): output_dtype = "int32" elif isinstance(args[0], float): output_dtype = "float16" else: output_dtype = args[0].dtype elif func.__name__ == "concat": output_dtype = args[0][0].dtype else: output_dtype = args[0].dtype op_output_dtype = output_dtype
Python
def judge_var(num): """judge var if a akg.tvm.var, akg.tvm.const or python data type""" var_dict = {"python_const": [int, float], "tvm_const": [akg.tvm.expr.IntImm, akg.tvm.expr.UIntImm, akg.tvm.expr.FloatImm], "tvm_var": [akg.tvm.expr.Var]} num_type = type(num) for i in var_dict: if num_type in var_dict[i]: return i raise RuntimeError("Input var Error")
def judge_var(num): """judge var if a akg.tvm.var, akg.tvm.const or python data type""" var_dict = {"python_const": [int, float], "tvm_const": [akg.tvm.expr.IntImm, akg.tvm.expr.UIntImm, akg.tvm.expr.FloatImm], "tvm_var": [akg.tvm.expr.Var]} num_type = type(num) for i in var_dict: if num_type in var_dict[i]: return i raise RuntimeError("Input var Error")
Python
def shape_to_list(shape): """translate akg.tvm.shape to list type in python""" tmp = [] for i in shape: if isinstance(i, akg.tvm.expr.Var): tmp.append(i) else: tmp.append(i.value) return tmp
def shape_to_list(shape): """translate akg.tvm.shape to list type in python""" tmp = [] for i in shape: if isinstance(i, akg.tvm.expr.Var): tmp.append(i) else: tmp.append(i.value) return tmp
Python
def Tile(data, multiples, target=utils.CCE): """ Repeats the data in the specified dimensions according to the multiples. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. multiples (Union[list, tuple]): Elements must be int. The number of repetitions. Returns: tvm.tensor.Tensor, has the same dtype as data. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape = [x.value for x in data.shape] dtype = data.dtype utils.check_shape(shape) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES) utils.check_int_list(multiples, "multiples") output = akg.topi.tile(data, multiples) return output
def Tile(data, multiples, target=utils.CCE): """ Repeats the data in the specified dimensions according to the multiples. Args: data (tvm.tensor.Tensor): Tensor of type float16, float32. multiples (Union[list, tuple]): Elements must be int. The number of repetitions. Returns: tvm.tensor.Tensor, has the same dtype as data. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) shape = [x.value for x in data.shape] dtype = data.dtype utils.check_shape(shape) utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES) utils.check_int_list(multiples, "multiples") output = akg.topi.tile(data, multiples) return output
Python
def triplet_loss_naive_grad(anchor_output, positive_output, negative_output, grad, margin=1.0, target="cce"): """ Calculate gradient for triplet loss. Args: anchor_output: Tensor. The training data. positive_output: Tensor. Positive samples. negative_output: Tensor. Negative samples. grad: Tensor. margin: Float. Margin for triplet. Returns: Tensor. """ fwd = triplet_loss_naive(anchor_output, positive_output, negative_output, margin) d_pos = (anchor_output - positive_output) d_neg = (anchor_output - negative_output) an_shape = get_shape(anchor_output) zero = akg.tvm.const(0, dtype=anchor_output.dtype) d_anchor = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, d_pos[i, j] * 2 - 2 * d_neg[i, j])), name="d_anchor") d_positive = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, -d_pos[i, j] * 2)), name="d_positive") d_negative = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, 2 * d_neg[i, j])), name="d_negative") return d_anchor, d_positive, d_negative
def triplet_loss_naive_grad(anchor_output, positive_output, negative_output, grad, margin=1.0, target="cce"): """ Calculate gradient for triplet loss. Args: anchor_output: Tensor. The training data. positive_output: Tensor. Positive samples. negative_output: Tensor. Negative samples. grad: Tensor. margin: Float. Margin for triplet. Returns: Tensor. """ fwd = triplet_loss_naive(anchor_output, positive_output, negative_output, margin) d_pos = (anchor_output - positive_output) d_neg = (anchor_output - negative_output) an_shape = get_shape(anchor_output) zero = akg.tvm.const(0, dtype=anchor_output.dtype) d_anchor = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, d_pos[i, j] * 2 - 2 * d_neg[i, j])), name="d_anchor") d_positive = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, -d_pos[i, j] * 2)), name="d_positive") d_negative = akg.tvm.compute(an_shape, lambda i, j: grad[i] * (akg.tvm.expr.Select(fwd[i] == 0, zero, 2 * d_neg[i, j])), name="d_negative") return d_anchor, d_positive, d_negative
Python
def GenData(shape, dtype): """Generate data for testing the op.""" class_num = shape[1] labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0]) labels = np.eye(class_num)[labels_int].astype(dtype) logits = np.random.random(shape).astype(dtype) logits = np.where(logits < 0.001, 0.001, logits) head_np = np.random.uniform(low=0, high=1.0, size=shape[0]).astype(dtype) head_np = head_np.reshape([shape[0], 1]) loss_ad_temp = labels / logits loss_ad_temp = loss_ad_temp * head_np loss_ad = loss_ad_temp * -1 return labels, logits, head_np, loss_ad
def GenData(shape, dtype): """Generate data for testing the op.""" class_num = shape[1] labels_int = np.random.randint(low=0, high=shape[1] - 1, size=shape[0]) labels = np.eye(class_num)[labels_int].astype(dtype) logits = np.random.random(shape).astype(dtype) logits = np.where(logits < 0.001, 0.001, logits) head_np = np.random.uniform(low=0, high=1.0, size=shape[0]).astype(dtype) head_np = head_np.reshape([shape[0], 1]) loss_ad_temp = labels / logits loss_ad_temp = loss_ad_temp * head_np loss_ad = loss_ad_temp * -1 return labels, logits, head_np, loss_ad
Python
def dense(data, w, bias_data=None, bias=False, target="cce"): """ Computes data * w if bias is False, else data * W + bias_data if bias is True. Args: data(akg.tvm.Tensor): Should be a 2D tensor of type float16 with shape(batch, in_dim). w(akg.tvm.Tensor): Should be a 2D tensor of same type as data with shape(out_dim, in_dim). bias_data(None, akg.tvm.Tensor): Could be None(if bias is False) or a 1D akg.tvm.Tensor of same type as data with shape(out_dim,). bias(bool): Specifies whether a bias vector will be used or not. Returns: 2D akg.tvm.Tensor of same type as data with shape(batch, out_dim). """ check_list = ["float16"] dtype = data.dtype if not dtype in check_list: raise TypeError("tile_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) d_shape = [x.value for x in data.shape] batch = d_shape[0] in_dim = d_shape[1] w_shape = [x.value for x in w.shape] if bias: out_dim = [x.value for x in bias_data.shape][0] else: out_dim = w_shape[0] k = akg.tvm.reduce_axis((0, in_dim), name='k') res = akg.tvm.compute((batch, out_dim), lambda i, j: akg.tvm.sum(data[i, k] * w[j, k], axis=k), name='M') if bias: res = akg.tvm.compute((batch, out_dim), lambda i, j: res[i, j] + bias_data[j]) return res
def dense(data, w, bias_data=None, bias=False, target="cce"): """ Computes data * w if bias is False, else data * W + bias_data if bias is True. Args: data(akg.tvm.Tensor): Should be a 2D tensor of type float16 with shape(batch, in_dim). w(akg.tvm.Tensor): Should be a 2D tensor of same type as data with shape(out_dim, in_dim). bias_data(None, akg.tvm.Tensor): Could be None(if bias is False) or a 1D akg.tvm.Tensor of same type as data with shape(out_dim,). bias(bool): Specifies whether a bias vector will be used or not. Returns: 2D akg.tvm.Tensor of same type as data with shape(batch, out_dim). """ check_list = ["float16"] dtype = data.dtype if not dtype in check_list: raise TypeError("tile_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) d_shape = [x.value for x in data.shape] batch = d_shape[0] in_dim = d_shape[1] w_shape = [x.value for x in w.shape] if bias: out_dim = [x.value for x in bias_data.shape][0] else: out_dim = w_shape[0] k = akg.tvm.reduce_axis((0, in_dim), name='k') res = akg.tvm.compute((batch, out_dim), lambda i, j: akg.tvm.sum(data[i, k] * w[j, k], axis=k), name='M') if bias: res = akg.tvm.compute((batch, out_dim), lambda i, j: res[i, j] + bias_data[j]) return res
Python
def apply_rms_prop(var, ms, mom, grad, lr, momentum, rho, epsilon, target=utils.CCE): """ Updates var using the RMSProp algorithm. .. math:: \\begin{array}{ll} \\\\ \\hat{ms} &= rho \\cdot ms + (1 - rho) \\cdot grad^2 \\\\ \\hat{mom} &= momentum \\cdot mom + \\frac{lr \\cdot grad}{\\sqrt{\\hat{ms} + epsilon}} \\\\ var &= var - mom \\end{array} Args: var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32. ms (tvm.tensor.Tensor): Mean square, a tensor of same shape and type as var. mom (tvm.tensor.Tensor): A tensor of same shape and type as var. grad (tvm.tensor.Tensor): A tensor of same shape and type as var. lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var. momentum (tvm.tensor.Tensor): Coefficient for calculate new mom, 0.0 <= momentum <= 1.0. rho (tvm.tensor.Tensor): Coefficient for calculate new ms, 0.0 <= rho <= 1.0. epsilon (float): A small value to prevent division by 0. Returns: tvm.tensor.Tensor, Updated var. tvm.tensor.Tensor, Updated ms. tvm.tensor.Tensor, Updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.ALL_FLOAT) _apply_rms_prop_check(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, out_ms, out_mom = _apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_ms, binds_info2 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info3 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return out_var, out_ms, out_mom, attrs
def apply_rms_prop(var, ms, mom, grad, lr, momentum, rho, epsilon, target=utils.CCE): """ Updates var using the RMSProp algorithm. .. math:: \\begin{array}{ll} \\\\ \\hat{ms} &= rho \\cdot ms + (1 - rho) \\cdot grad^2 \\\\ \\hat{mom} &= momentum \\cdot mom + \\frac{lr \\cdot grad}{\\sqrt{\\hat{ms} + epsilon}} \\\\ var &= var - mom \\end{array} Args: var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32. ms (tvm.tensor.Tensor): Mean square, a tensor of same shape and type as var. mom (tvm.tensor.Tensor): A tensor of same shape and type as var. grad (tvm.tensor.Tensor): A tensor of same shape and type as var. lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var. momentum (tvm.tensor.Tensor): Coefficient for calculate new mom, 0.0 <= momentum <= 1.0. rho (tvm.tensor.Tensor): Coefficient for calculate new ms, 0.0 <= rho <= 1.0. epsilon (float): A small value to prevent division by 0. Returns: tvm.tensor.Tensor, Updated var. tvm.tensor.Tensor, Updated ms. tvm.tensor.Tensor, Updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.ALL_FLOAT) _apply_rms_prop_check(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, out_ms, out_mom = _apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_ms, binds_info2 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info3 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return out_var, out_ms, out_mom, attrs
Python
def apply_rms_prop_mixed_precision(var, ms, mom, grad, lr, momentum, rho, epsilon): """ Mixed precision version for apply_rms_prop. Args: var (tvm.tensor.Tensor): The tensor to be updated. Should be float32. ms (tvm.tensor.Tensor): Mean square, a tensor of same shape and type as var. mom (tvm.tensor.Tensor): A tensor of same shape and type as var. grad (tvm.tensor.Tensor): A tensor of same shape and type as var. lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var. momentum (float): Coefficient for calculate new mom, 0.0 <= momentum <= 1.0. rho (float): Coefficient for calculate new ms, 0.0 <= rho <= 1.0. epsilon (float): A small value to prevent division by 0. Returns: tvm.tensor.Tensor, Updated var of type float32. tvm.tensor.Tensor, Updated var of type float16. tvm.tensor.Tensor, Updated ms. tvm.tensor.Tensor, Updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.FLOAT32) _apply_rms_prop_check(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, out_var_fp16, out_ms, out_mom = _apply_rms_prop_mixed_precision_compute( var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_ms, binds_info2 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info3 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return out_var, out_var_fp16, out_ms, out_mom, attrs
def apply_rms_prop_mixed_precision(var, ms, mom, grad, lr, momentum, rho, epsilon): """ Mixed precision version for apply_rms_prop. Args: var (tvm.tensor.Tensor): The tensor to be updated. Should be float32. ms (tvm.tensor.Tensor): Mean square, a tensor of same shape and type as var. mom (tvm.tensor.Tensor): A tensor of same shape and type as var. grad (tvm.tensor.Tensor): A tensor of same shape and type as var. lr (tvm.tensor.Tensor): Learning rate, a scalar tensor of same type as var. momentum (float): Coefficient for calculate new mom, 0.0 <= momentum <= 1.0. rho (float): Coefficient for calculate new ms, 0.0 <= rho <= 1.0. epsilon (float): A small value to prevent division by 0. Returns: tvm.tensor.Tensor, Updated var of type float32. tvm.tensor.Tensor, Updated var of type float16. tvm.tensor.Tensor, Updated ms. tvm.tensor.Tensor, Updated mom. """ utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.FLOAT32) _apply_rms_prop_check(var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, out_var_fp16, out_ms, out_mom = _apply_rms_prop_mixed_precision_compute( var, ms, mom, grad, lr, momentum, rho, epsilon) out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf") out_ms, binds_info2 = TensorUtils.inplace_set(ms, out_ms, "ms_buf") out_mom, binds_info3 = TensorUtils.inplace_set(mom, out_mom, "mom_buf") binds_info.update(binds_info2) binds_info.update(binds_info3) attrs = {utils.BINDS: binds_info} return out_var, out_var_fp16, out_ms, out_mom, attrs
Python
def ExpAd(head, in_data, target=utils.CCE): """ Compute gradient of exp operator using automatic differentiate. Args: head (tvm.tensor.Tensor): Tensor of type float16, float32. in_data (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor has the same shape as input. Supported Platforms: 'Ascend' """ # check head's validation. utils.check_shape(head.shape) utils.ops_dtype_check(head.dtype, utils.DtypeForDavinci.ALL_FLOAT) exp_in_data = Exp(in_data, target) jacs = list(akg.differentiate(exp_in_data, [in_data], head)) return jacs[0]
def ExpAd(head, in_data, target=utils.CCE): """ Compute gradient of exp operator using automatic differentiate. Args: head (tvm.tensor.Tensor): Tensor of type float16, float32. in_data (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor has the same shape as input. Supported Platforms: 'Ascend' """ # check head's validation. utils.check_shape(head.shape) utils.ops_dtype_check(head.dtype, utils.DtypeForDavinci.ALL_FLOAT) exp_in_data = Exp(in_data, target) jacs = list(akg.differentiate(exp_in_data, [in_data], head)) return jacs[0]
Python
def Conv(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias=False, attrs=None, params=None, target=utils.CCE): """ Computes sums of 5-D convolutionis. Args: data (list[tvm.tensor.Tensor]): the size is 3 if use_bias else the size is 2; data[0] Tensor of type float16 ,shape 5D (fN, fC // C0, C0, fH, fW) data[1] Tensor of type float16 ,shape 4D (wC // C0 * wH * wW, wN // C0, C0, C0) data[2] Tensor of type float16 ,shape 5D (1, wN // C0, 1, 1, 16) fmap_shape (list[int]): [fN, fC, fH, fW] filter_shape (list[int]): [wN, wC, wH, wW] pad (list[int]): [pad_top, pad_bottom, pad_left, pad_right] stride (list[int]): [stride_h, stride_w] dilation (list[int]): [dilation_h, dilation_w] use_bias (bool): bool var. attrs (dict): dict with keys for example: conv_tile,bypass Returns: tvm.tensor.Tensor of same type as data, shape is 5D(oN, oC // C0, oH, oW, C0) Supported Platforms: 'Ascend' """ c_value = conv_core(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs) c_value = Cast(c_value, "float16", utils.CCE) if use_bias: bias_value = data[2] output_bias_name = "output1" cube = akg.tvm.compute(c_value.shape, lambda n, c1, h, w, c0: c_value[n, c1, h, w, c0] + bias_value[0, c1, 0, 0, c0], name=output_bias_name) else: cube = c_value block_size = 16 dim_info, _, _, dynamic_ci_c1 = conv_set_dim_func(fmap_shape, filter_shape, pad, stride, dilation, use_bias, block_size, attrs, conv_set_dim_map) all_dynamic = 0 # kh kw pad stride partial_dynamic = 0 # fn fc1 fh fw wN wC dynamic_tiling_full_dynamic = 1 # kh, kw, pad, stride are parameters if dynamic_tiling is enabled if attrs is None: attrs = {} if attrs.get("dynamic"): all_dynamic = 1 if attrs.get("partial_dynamic"): partial_dynamic = 1 dynamic = partial_dynamic or all_dynamic dynamic_tiling = 1 if attrs.get("dynamic") else 0 if not dynamic: attrs = {"dim": dim_info, "pragma_rmselfdep": 0} else: attrs = {"dim": dim_info, "pragma_rmselfdep": 0, "enable_fix_loop_extent": 0, "enable_post_poly_loop_partition": 0, "enable_isolate_loop": 0, "enable_isolate_min_max": 1, "enable_conv_analyze_align": 0, "enable_double_buffer": 1, "enable_multicore": 1, "enable_invariant_hoist": 1, "pragma_keep_outer_band_order": 1, "enable_algebra_simplify": 1, "dynamic_shape_conv_full_parametric": dynamic_tiling and dynamic_tiling_full_dynamic, } attrs["pragma_outerband_need_split"] = 1 attrs["pragma_is_conv"] = 1 if dynamic_tiling: attrs["dynamic_shape"] = set_poly_upper_bound_for_tensor(data[0], 129, 1) # pos 1 of data[0] is CI1 axis else: attrs["dynamic_shape"] = set_poly_upper_bound_for_tensor( data[0], dynamic_ci_c1 + 1, 1) # pos 1 of data[0] is CI1 axis if dynamic_tiling: attrs["pragma_tilesize_is_var"] = 1 attrs["enable_stride_kernel_op"] = 0 return cube, attrs
def Conv(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias=False, attrs=None, params=None, target=utils.CCE): """ Computes sums of 5-D convolutionis. Args: data (list[tvm.tensor.Tensor]): the size is 3 if use_bias else the size is 2; data[0] Tensor of type float16 ,shape 5D (fN, fC // C0, C0, fH, fW) data[1] Tensor of type float16 ,shape 4D (wC // C0 * wH * wW, wN // C0, C0, C0) data[2] Tensor of type float16 ,shape 5D (1, wN // C0, 1, 1, 16) fmap_shape (list[int]): [fN, fC, fH, fW] filter_shape (list[int]): [wN, wC, wH, wW] pad (list[int]): [pad_top, pad_bottom, pad_left, pad_right] stride (list[int]): [stride_h, stride_w] dilation (list[int]): [dilation_h, dilation_w] use_bias (bool): bool var. attrs (dict): dict with keys for example: conv_tile,bypass Returns: tvm.tensor.Tensor of same type as data, shape is 5D(oN, oC // C0, oH, oW, C0) Supported Platforms: 'Ascend' """ c_value = conv_core(data, fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs) c_value = Cast(c_value, "float16", utils.CCE) if use_bias: bias_value = data[2] output_bias_name = "output1" cube = akg.tvm.compute(c_value.shape, lambda n, c1, h, w, c0: c_value[n, c1, h, w, c0] + bias_value[0, c1, 0, 0, c0], name=output_bias_name) else: cube = c_value block_size = 16 dim_info, _, _, dynamic_ci_c1 = conv_set_dim_func(fmap_shape, filter_shape, pad, stride, dilation, use_bias, block_size, attrs, conv_set_dim_map) all_dynamic = 0 # kh kw pad stride partial_dynamic = 0 # fn fc1 fh fw wN wC dynamic_tiling_full_dynamic = 1 # kh, kw, pad, stride are parameters if dynamic_tiling is enabled if attrs is None: attrs = {} if attrs.get("dynamic"): all_dynamic = 1 if attrs.get("partial_dynamic"): partial_dynamic = 1 dynamic = partial_dynamic or all_dynamic dynamic_tiling = 1 if attrs.get("dynamic") else 0 if not dynamic: attrs = {"dim": dim_info, "pragma_rmselfdep": 0} else: attrs = {"dim": dim_info, "pragma_rmselfdep": 0, "enable_fix_loop_extent": 0, "enable_post_poly_loop_partition": 0, "enable_isolate_loop": 0, "enable_isolate_min_max": 1, "enable_conv_analyze_align": 0, "enable_double_buffer": 1, "enable_multicore": 1, "enable_invariant_hoist": 1, "pragma_keep_outer_band_order": 1, "enable_algebra_simplify": 1, "dynamic_shape_conv_full_parametric": dynamic_tiling and dynamic_tiling_full_dynamic, } attrs["pragma_outerband_need_split"] = 1 attrs["pragma_is_conv"] = 1 if dynamic_tiling: attrs["dynamic_shape"] = set_poly_upper_bound_for_tensor(data[0], 129, 1) # pos 1 of data[0] is CI1 axis else: attrs["dynamic_shape"] = set_poly_upper_bound_for_tensor( data[0], dynamic_ci_c1 + 1, 1) # pos 1 of data[0] is CI1 axis if dynamic_tiling: attrs["pragma_tilesize_is_var"] = 1 attrs["enable_stride_kernel_op"] = 0 return cube, attrs
Python
def detection_four2five(data, slice_idx, target=utils.CCE): """ Change data from specific four dims to five dims format. Shape changes: [N, box_num * H * W, 4, 1] -> [N, ceil((box_num * 4) / 16), H, W, 16]. Note: With slice + detection_four2five, it can make data with shape [16, 8732, 4, 1] to six data with shape [16, 16//16, 38, 38, 16], [16, 24//16+1, 19, 19, 16], [16, 24//16+1, 10, 10, 16], [16, 24//16+1, 5, 5, 16], [16, 16//16, 3, 3, 16]. Args: data (tvm.tensor.Tensor): Tensor of type float16 with four dims format which the length of last dim is 1. slice_idx (int): Index of slice number. Returns: A tensor with five dims shape. """ utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16) bs = data.shape[0] shape_list = [(bs, 16), (bs, 144), (bs, 600), (bs, 2400), (bs, 8664), (bs, 23104)] res = None if slice_idx == 0: res = akg.tvm.compute(shape_list[0], lambda i, j: data[i][j], name="shape1") elif slice_idx == 1: res = akg.tvm.compute(shape_list[1], lambda i, j: data[i][j + 16], name="shape2") elif slice_idx == 2: res = akg.tvm.compute(shape_list[2], lambda i, j: data[i][j + 160], name="shape3") elif slice_idx == 3: res = akg.tvm.compute(shape_list[3], lambda i, j: data[i][j + 760], name="shape4") elif slice_idx == 4: res = akg.tvm.compute(shape_list[4], lambda i, j: data[i][j + 3160], name="shape5") elif slice_idx == 5: res = akg.tvm.compute(shape_list[5], lambda i, j: data[i][j + 11824], name="shape6") else: raise ValueError("slice index {} not support!".format(slice_idx)) return res
def detection_four2five(data, slice_idx, target=utils.CCE): """ Change data from specific four dims to five dims format. Shape changes: [N, box_num * H * W, 4, 1] -> [N, ceil((box_num * 4) / 16), H, W, 16]. Note: With slice + detection_four2five, it can make data with shape [16, 8732, 4, 1] to six data with shape [16, 16//16, 38, 38, 16], [16, 24//16+1, 19, 19, 16], [16, 24//16+1, 10, 10, 16], [16, 24//16+1, 5, 5, 16], [16, 16//16, 3, 3, 16]. Args: data (tvm.tensor.Tensor): Tensor of type float16 with four dims format which the length of last dim is 1. slice_idx (int): Index of slice number. Returns: A tensor with five dims shape. """ utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.FLOAT16) bs = data.shape[0] shape_list = [(bs, 16), (bs, 144), (bs, 600), (bs, 2400), (bs, 8664), (bs, 23104)] res = None if slice_idx == 0: res = akg.tvm.compute(shape_list[0], lambda i, j: data[i][j], name="shape1") elif slice_idx == 1: res = akg.tvm.compute(shape_list[1], lambda i, j: data[i][j + 16], name="shape2") elif slice_idx == 2: res = akg.tvm.compute(shape_list[2], lambda i, j: data[i][j + 160], name="shape3") elif slice_idx == 3: res = akg.tvm.compute(shape_list[3], lambda i, j: data[i][j + 760], name="shape4") elif slice_idx == 4: res = akg.tvm.compute(shape_list[4], lambda i, j: data[i][j + 3160], name="shape5") elif slice_idx == 5: res = akg.tvm.compute(shape_list[5], lambda i, j: data[i][j + 11824], name="shape6") else: raise ValueError("slice index {} not support!".format(slice_idx)) return res
Python
def asinh_grad_run(shape, dtype, attrs): """run function for dsl function asinh_grad.""" shapes = [shape, shape] dtypes = [dtype, dtype] mod = utils.op_build_test(AsinhGrad, shapes, dtypes, kernel_name="asinh_grad", attrs=attrs) bench_mark, inputs, output = gen_data(dtype, shape) output = utils.mod_launch(mod, inputs + [output], expect=bench_mark) rtol, atol = get_rtol_atol("asinh_grad", dtype) compare_res = compare_tensor(output, bench_mark, rtol=rtol, atol=atol) return inputs, output, bench_mark, compare_res
def asinh_grad_run(shape, dtype, attrs): """run function for dsl function asinh_grad.""" shapes = [shape, shape] dtypes = [dtype, dtype] mod = utils.op_build_test(AsinhGrad, shapes, dtypes, kernel_name="asinh_grad", attrs=attrs) bench_mark, inputs, output = gen_data(dtype, shape) output = utils.mod_launch(mod, inputs + [output], expect=bench_mark) rtol, atol = get_rtol_atol("asinh_grad", dtype) compare_res = compare_tensor(output, bench_mark, rtol=rtol, atol=atol) return inputs, output, bench_mark, compare_res
Python
def gen_data(dtype, shape): """Generate data for testing the op""" y = random_gaussian(size=shape).astype(dtype) dy = random_gaussian(size=shape).astype(dtype) expect = _asinh_grad_compute(y, dy) output = np.full(expect.shape, np.nan, dtype) return expect, [y, dy], output
def gen_data(dtype, shape): """Generate data for testing the op""" y = random_gaussian(size=shape).astype(dtype) dy = random_gaussian(size=shape).astype(dtype) expect = _asinh_grad_compute(y, dy) output = np.full(expect.shape, np.nan, dtype) return expect, [y, dy], output
Python
def mean_square(inputs, axis=None, keepdims=False, target="cce"): """Mean of square value of a tensor, alongside the specified axis. Arguments: input: A tensor. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of element-wise square value of `input`. Notice: There is some precision problem for the operator and remain to solve """ inputs_square = square(inputs) return Mean(inputs_square, axis, keepdims, target=target)
def mean_square(inputs, axis=None, keepdims=False, target="cce"): """Mean of square value of a tensor, alongside the specified axis. Arguments: input: A tensor. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of element-wise square value of `input`. Notice: There is some precision problem for the operator and remain to solve """ inputs_square = square(inputs) return Mean(inputs_square, axis, keepdims, target=target)
Python
def _parse_graph(self, graph_json): """Parse and extract the NNVM graph and update the nodes, shapes and dltype. Parameters ---------- graph_json : str or graph class The graph to be deployed in json format output by nnvm graph. """ json_obj = json.loads(graph_json) self._nodes_list = json_obj['nodes'] self._shapes_list = json_obj['attrs']['shape'] self._dtype_list = json_obj['attrs']['dltype'] self._update_graph_json()
def _parse_graph(self, graph_json): """Parse and extract the NNVM graph and update the nodes, shapes and dltype. Parameters ---------- graph_json : str or graph class The graph to be deployed in json format output by nnvm graph. """ json_obj = json.loads(graph_json) self._nodes_list = json_obj['nodes'] self._shapes_list = json_obj['attrs']['shape'] self._dtype_list = json_obj['attrs']['dltype'] self._update_graph_json()
Python
def _update_graph_json(self): """update the nodes_list with name, shape and data type, for temporarily storing the output. """ nodes_len = len(self._nodes_list) for i in range(nodes_len): node = self._nodes_list[i] input_list = [] for input_node in node['inputs']: input_list.append(self._nodes_list[input_node[0]]['name']) node['inputs'] = input_list dtype = str("type: " + self._dtype_list[1][i]) if 'attrs' not in node: node['attrs'] = {} node['op'] = "param" else: node['op'] = node['attrs']['func_name'] node['attrs'].update({"T": dtype}) node['shape'] = self._shapes_list[1][i]
def _update_graph_json(self): """update the nodes_list with name, shape and data type, for temporarily storing the output. """ nodes_len = len(self._nodes_list) for i in range(nodes_len): node = self._nodes_list[i] input_list = [] for input_node in node['inputs']: input_list.append(self._nodes_list[input_node[0]]['name']) node['inputs'] = input_list dtype = str("type: " + self._dtype_list[1][i]) if 'attrs' not in node: node['attrs'] = {} node['op'] = "param" else: node['op'] = node['attrs']['func_name'] node['attrs'].update({"T": dtype}) node['shape'] = self._shapes_list[1][i]
Python
def _cleanup_tensors(self): """Remove the tensor dump file (graph wont be removed) """ for filename in os.listdir(self._dump_path): if os.path.isfile(filename) and not filename.endswith(".json"): os.remove(filename)
def _cleanup_tensors(self): """Remove the tensor dump file (graph wont be removed) """ for filename in os.listdir(self._dump_path): if os.path.isfile(filename) and not filename.endswith(".json"): os.remove(filename)
Python
def dump_output_tensor(self): """Dump the outputs to a temporary folder, the tensors are in numpy format """ #cleanup existing tensors before dumping self._cleanup_tensors() eid = 0 order = 0 output_tensors = {} for node, time in zip(self._nodes_list, self._time_list): num_outputs = self.get_graph_node_output_num(node) for j in range(num_outputs): order += time[0] key = node['name'] + "_" + str(j) + "__" + str(order) output_tensors[key] = self._output_tensor_list[eid] eid += 1 with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f: param_f.write(save_tensors(output_tensors))
def dump_output_tensor(self): """Dump the outputs to a temporary folder, the tensors are in numpy format """ #cleanup existing tensors before dumping self._cleanup_tensors() eid = 0 order = 0 output_tensors = {} for node, time in zip(self._nodes_list, self._time_list): num_outputs = self.get_graph_node_output_num(node) for j in range(num_outputs): order += time[0] key = node['name'] + "_" + str(j) + "__" + str(order) output_tensors[key] = self._output_tensor_list[eid] eid += 1 with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f: param_f.write(save_tensors(output_tensors))
Python
def dump_chrome_trace(self): """Dump the trace to the Chrome trace.json format. """ def s_to_us(t): return t * 10 ** 6 starting_times = np.zeros(len(self._time_list) + 1) starting_times[1:] = np.cumsum([times[0] for times in self._time_list]) def node_to_events(node, times, starting_time): return [ ChromeTraceEvent( ts=s_to_us(starting_time), tid=1, pid=1, ph='B', name=node['name'], ), ChromeTraceEvent( # Use start + duration instead of end to ensure precise timings. ts=s_to_us(times[0] + starting_time), tid=1, pid=1, ph='E', name=node['name'], ), ] events = [ e for (node, times, starting_time) in zip( self._nodes_list, self._time_list, starting_times) for e in node_to_events(node, times, starting_time)] result = dict( displayTimeUnit='ns', traceEvents=[e._asdict() for e in events] ) with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f: json.dump(result, trace_f)
def dump_chrome_trace(self): """Dump the trace to the Chrome trace.json format. """ def s_to_us(t): return t * 10 ** 6 starting_times = np.zeros(len(self._time_list) + 1) starting_times[1:] = np.cumsum([times[0] for times in self._time_list]) def node_to_events(node, times, starting_time): return [ ChromeTraceEvent( ts=s_to_us(starting_time), tid=1, pid=1, ph='B', name=node['name'], ), ChromeTraceEvent( # Use start + duration instead of end to ensure precise timings. ts=s_to_us(times[0] + starting_time), tid=1, pid=1, ph='E', name=node['name'], ), ] events = [ e for (node, times, starting_time) in zip( self._nodes_list, self._time_list, starting_times) for e in node_to_events(node, times, starting_time)] result = dict( displayTimeUnit='ns', traceEvents=[e._asdict() for e in events] ) with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f: json.dump(result, trace_f)
Python
def dump_graph_json(self, graph): """Dump json formatted graph. Parameters ---------- graph : json format json formatted NNVM graph contain list of each node's name, shape and type. """ graph_dump_file_name = GRAPH_DUMP_FILE_NAME with open(os.path.join(self._dump_path, graph_dump_file_name), 'w') as outfile: json.dump(graph, outfile, indent=4, sort_keys=False)
def dump_graph_json(self, graph): """Dump json formatted graph. Parameters ---------- graph : json format json formatted NNVM graph contain list of each node's name, shape and type. """ graph_dump_file_name = GRAPH_DUMP_FILE_NAME with open(os.path.join(self._dump_path, graph_dump_file_name), 'w') as outfile: json.dump(graph, outfile, indent=4, sort_keys=False)
Python
def save_tensors(params): """Save parameter dictionary to binary bytes. The result binary bytes can be loaded by the GraphModule with API "load_params". Parameters ---------- params : dict of str to NDArray The parameter dictionary. Returns ------- param_bytes: bytearray Serialized parameters. """ _save_tensors = tvm.get_global_func("_save_param_dict") args = [] for k, v in params.items(): args.append(k) args.append(tvm.nd.array(v)) return _save_tensors(*args)
def save_tensors(params): """Save parameter dictionary to binary bytes. The result binary bytes can be loaded by the GraphModule with API "load_params". Parameters ---------- params : dict of str to NDArray The parameter dictionary. Returns ------- param_bytes: bytearray Serialized parameters. """ _save_tensors = tvm.get_global_func("_save_param_dict") args = [] for k, v in params.items(): args.append(k) args.append(tvm.nd.array(v)) return _save_tensors(*args)
Python
def Reshape(data, out_shape, target=utils.CUDA): """ Rearranges input tensor data to new shape out_shape. Args: data (tvm.tensor.Tensor): The tensor to be reshaped. out_shape (list, tuple): The new shape applied on the input tensor data, should be compatible with the original shape of data. Returns: The reshaped akg.tvm.tensor of same type as input tensor data. Supported Platforms: 'Ascend', 'GPU' """ if target == utils.CCE: return _reshape_ascend(data, out_shape) data_shape = data.shape utils.check_shape(data_shape) in_shape = get_shape(data) out_shape = list(out_shape) if -1 in out_shape: access_size = 1 for i, o_shape in enumerate(out_shape): if -1 != o_shape: access_size *= o_shape else: hit_idx = i ori_size = reduce(lambda x, y: x * y, in_shape) if ori_size % access_size != 0: raise ValueError(("Invalid out_shape ({})".format(out_shape))) out_shape[hit_idx] = int(ori_size / access_size) res = akg.topi.reshape(data, out_shape) return res
def Reshape(data, out_shape, target=utils.CUDA): """ Rearranges input tensor data to new shape out_shape. Args: data (tvm.tensor.Tensor): The tensor to be reshaped. out_shape (list, tuple): The new shape applied on the input tensor data, should be compatible with the original shape of data. Returns: The reshaped akg.tvm.tensor of same type as input tensor data. Supported Platforms: 'Ascend', 'GPU' """ if target == utils.CCE: return _reshape_ascend(data, out_shape) data_shape = data.shape utils.check_shape(data_shape) in_shape = get_shape(data) out_shape = list(out_shape) if -1 in out_shape: access_size = 1 for i, o_shape in enumerate(out_shape): if -1 != o_shape: access_size *= o_shape else: hit_idx = i ori_size = reduce(lambda x, y: x * y, in_shape) if ori_size % access_size != 0: raise ValueError(("Invalid out_shape ({})".format(out_shape))) out_shape[hit_idx] = int(ori_size / access_size) res = akg.topi.reshape(data, out_shape) return res
Python
def _reshape_ascend(data, out_shape): """ Rearranges input tensor data to new shape out_shape. Args: data (tvm.tensor.Tensor): The tensor to be reshaped. out_shape (list, tuple): The new shape applied on the input tensor data, should be compatible with the original shape of data. Returns: The reshaped akg.tvm.tensor of same type as input tensor data. Supported Platforms: 'Ascend' """ utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.INT32.value + utils.DtypeForDavinci.ALL_FLOAT.value) data_shape = data.shape utils.check_shape(data_shape) in_shape = get_shape(data) out_shape = list(out_shape) is_dynamic = ds.shape_is_dynamic(data) if -1 in out_shape: access_size = 1 for i, o_shape in enumerate(out_shape): if -1 != o_shape: access_size *= o_shape else: hit_idx = i ori_size = reduce(lambda x, y: x * y, in_shape) if ori_size % access_size != 0: raise ValueError(("Invalid out_shape ({})".format(out_shape))) out_shape[hit_idx] = int(ori_size / access_size) else: if not is_dynamic: if reduce(lambda x, y: x * y, in_shape) != reduce(lambda x, y: x * y, out_shape): raise ValueError("the total length of out_shape is not equal to the in_shape") inputs = akg.tvm.compute(in_shape, lambda *indice: data(*indice), name="inputs") res = akg.topi.reshape(inputs, out_shape) output = akg.tvm.compute(out_shape, lambda *indice: res(*indice), name="reshape") return output
def _reshape_ascend(data, out_shape): """ Rearranges input tensor data to new shape out_shape. Args: data (tvm.tensor.Tensor): The tensor to be reshaped. out_shape (list, tuple): The new shape applied on the input tensor data, should be compatible with the original shape of data. Returns: The reshaped akg.tvm.tensor of same type as input tensor data. Supported Platforms: 'Ascend' """ utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.INT32.value + utils.DtypeForDavinci.ALL_FLOAT.value) data_shape = data.shape utils.check_shape(data_shape) in_shape = get_shape(data) out_shape = list(out_shape) is_dynamic = ds.shape_is_dynamic(data) if -1 in out_shape: access_size = 1 for i, o_shape in enumerate(out_shape): if -1 != o_shape: access_size *= o_shape else: hit_idx = i ori_size = reduce(lambda x, y: x * y, in_shape) if ori_size % access_size != 0: raise ValueError(("Invalid out_shape ({})".format(out_shape))) out_shape[hit_idx] = int(ori_size / access_size) else: if not is_dynamic: if reduce(lambda x, y: x * y, in_shape) != reduce(lambda x, y: x * y, out_shape): raise ValueError("the total length of out_shape is not equal to the in_shape") inputs = akg.tvm.compute(in_shape, lambda *indice: data(*indice), name="inputs") res = akg.topi.reshape(inputs, out_shape) output = akg.tvm.compute(out_shape, lambda *indice: res(*indice), name="reshape") return output
Python
def AccumulateNv2(data, target=utils.CCE): """ Compute sum of all elements in tensor. Args: data (Union[tuple, list]): the list of input tensors of type float16, float32, int8, uint8, int32. Returns: tvm.tensor.Tensor, compute result, get all elements' sum. Supported Platforms: 'Ascend' """ for d in data: utils.ops_dtype_check(d.dtype, utils.DtypeForDavinci.ALL_TYPES) for i in range(1, len(data)): utils.elemwise_dtype_check(data[0].dtype, data[i].dtype) utils.elemwise_shape_check(data[0].shape, data[i].shape) res = _accumulate_nv2_compute(data) return res
def AccumulateNv2(data, target=utils.CCE): """ Compute sum of all elements in tensor. Args: data (Union[tuple, list]): the list of input tensors of type float16, float32, int8, uint8, int32. Returns: tvm.tensor.Tensor, compute result, get all elements' sum. Supported Platforms: 'Ascend' """ for d in data: utils.ops_dtype_check(d.dtype, utils.DtypeForDavinci.ALL_TYPES) for i in range(1, len(data)): utils.elemwise_dtype_check(data[0].dtype, data[i].dtype) utils.elemwise_shape_check(data[0].shape, data[i].shape) res = _accumulate_nv2_compute(data) return res
Python
def apply_ftrl_v2_run(shape, dtype, attrs=None): """run function for dsl function apply_ftrl_v2.""" scalar_shape = (1,) var_shape, accum_shape, linear_shape, grad_shape = [shape] * 4 lr_shape, l1_shape, l2_shape, l2_shrinkage_shape, lr_power_shape = [scalar_shape] * 5 shapes = [var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape, l2_shrinkage_shape, lr_power_shape] dtypes = [dtype] * 9 mod = utils.op_build_test(apply_ftrl_v2, shapes, dtypes, kernel_name='apply_ftrl_v2', attrs=attrs) expects, (var, accum, linear, grad), (lr, l1, l2, l2_shrinkage, lr_power) = ftrl_gen_data(dtype, shape, with_l2_shrinkage=True) outputs = utils.mod_launch(mod, (var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power), outputs=(0, 1, 2)) rtol, atol = get_rtol_atol("apply_ftrl_v2", dtype) compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects)) inputs = (var, accum, linear, grad, lr, l1, l2, l2_shrinkage) return inputs, outputs, expects, all(compare_result)
def apply_ftrl_v2_run(shape, dtype, attrs=None): """run function for dsl function apply_ftrl_v2.""" scalar_shape = (1,) var_shape, accum_shape, linear_shape, grad_shape = [shape] * 4 lr_shape, l1_shape, l2_shape, l2_shrinkage_shape, lr_power_shape = [scalar_shape] * 5 shapes = [var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape, l2_shrinkage_shape, lr_power_shape] dtypes = [dtype] * 9 mod = utils.op_build_test(apply_ftrl_v2, shapes, dtypes, kernel_name='apply_ftrl_v2', attrs=attrs) expects, (var, accum, linear, grad), (lr, l1, l2, l2_shrinkage, lr_power) = ftrl_gen_data(dtype, shape, with_l2_shrinkage=True) outputs = utils.mod_launch(mod, (var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power), outputs=(0, 1, 2)) rtol, atol = get_rtol_atol("apply_ftrl_v2", dtype) compare_result = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects)) inputs = (var, accum, linear, grad, lr, l1, l2, l2_shrinkage) return inputs, outputs, expects, all(compare_result)
Python
def logsoftmax_ad(shape, dtype, axis, kernel_name, attrs): """Compute the gradient of logsoftmax by autodiff.""" check_list = ["float16"] if not dtype.lower() in check_list: raise RuntimeError("logsoftmax test only support %s while dtype is %s" % (",".join(check_list), dtype)) # check_shape(shape) if axis < 0: axis = len(shape) + axis if axis >= len(shape): raise RuntimeError("axis should be less than dimension") if axis != len(shape) - 1: raise RuntimeError("Only support the last axis currently") shape_new = [shape[-2], shape[-1]] if len(shape) > 2: for i in range(len(shape) - 2): shape_new[0] = shape_new[0] * shape[i] shape = shape_new a_up = akg.tvm.placeholder(shape, dtype=dtype, name="input") b_up = logsoftmax.logsoftmax_op(a_up, shape, axis) head = akg.tvm.placeholder(b_up.shape, name="head", dtype=dtype) _jacs = list(akg.differentiate(b_up, [a_up], head)) sjac = akg.tvm.create_schedule([_jacs[0].op]) sjac[_jacs[0].op.input_tensors[1]].compute_inline() op_vars = [head, a_up, _jacs[0]] with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True): mod = akg.build(sjac, op_vars, "cce", name="test2", attrs=attrs, polyhedral=True) return mod
def logsoftmax_ad(shape, dtype, axis, kernel_name, attrs): """Compute the gradient of logsoftmax by autodiff.""" check_list = ["float16"] if not dtype.lower() in check_list: raise RuntimeError("logsoftmax test only support %s while dtype is %s" % (",".join(check_list), dtype)) # check_shape(shape) if axis < 0: axis = len(shape) + axis if axis >= len(shape): raise RuntimeError("axis should be less than dimension") if axis != len(shape) - 1: raise RuntimeError("Only support the last axis currently") shape_new = [shape[-2], shape[-1]] if len(shape) > 2: for i in range(len(shape) - 2): shape_new[0] = shape_new[0] * shape[i] shape = shape_new a_up = akg.tvm.placeholder(shape, dtype=dtype, name="input") b_up = logsoftmax.logsoftmax_op(a_up, shape, axis) head = akg.tvm.placeholder(b_up.shape, name="head", dtype=dtype) _jacs = list(akg.differentiate(b_up, [a_up], head)) sjac = akg.tvm.create_schedule([_jacs[0].op]) sjac[_jacs[0].op.input_tensors[1]].compute_inline() op_vars = [head, a_up, _jacs[0]] with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True): mod = akg.build(sjac, op_vars, "cce", name="test2", attrs=attrs, polyhedral=True) return mod
Python
def AssignSub(data1, data2, target=utils.CCE): """ Computes data1 - data2 elementwise. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8, uint8. data2 (tvm.tensor.Tensor): Tensor of same shape and type as data1. Returns: Subtracted result, with same shape and type as input tensors. Supported Platforms: 'Ascend' """ dtype = data1.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES) utils.elemwise_dtype_check(data1.dtype, data2.dtype) utils.elemwise_shape_check(data1.shape, data2.shape) need_cast_dtype = ["int8", "uint8"] cast_type = "float16" if dtype in need_cast_dtype: data1 = akg.topi.cast(data1, cast_type) data2 = akg.topi.cast(data2, cast_type) res = akg.topi.subtract(data1, data2) if dtype in need_cast_dtype: if dtype == "uint8": cons = akg.tvm.const(256, dtype=cast_type) res = akg.tvm.compute(res.shape, lambda *indice: akg.tvm.expr.Select(res(*indice) < 0, res(*indice) + cons, res(*indice)), name="positive_res") res = akg.topi.cast(res, dtype) return res
def AssignSub(data1, data2, target=utils.CCE): """ Computes data1 - data2 elementwise. Args: data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8, uint8. data2 (tvm.tensor.Tensor): Tensor of same shape and type as data1. Returns: Subtracted result, with same shape and type as input tensors. Supported Platforms: 'Ascend' """ dtype = data1.dtype utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES) utils.elemwise_dtype_check(data1.dtype, data2.dtype) utils.elemwise_shape_check(data1.shape, data2.shape) need_cast_dtype = ["int8", "uint8"] cast_type = "float16" if dtype in need_cast_dtype: data1 = akg.topi.cast(data1, cast_type) data2 = akg.topi.cast(data2, cast_type) res = akg.topi.subtract(data1, data2) if dtype in need_cast_dtype: if dtype == "uint8": cons = akg.tvm.const(256, dtype=cast_type) res = akg.tvm.compute(res.shape, lambda *indice: akg.tvm.expr.Select(res(*indice) < 0, res(*indice) + cons, res(*indice)), name="positive_res") res = akg.topi.cast(res, dtype) return res
Python
def globalavgpool(n, c, h, w, pool_type, attrs, kernel_name="global_pool"): """ Performs the global average pooling on the input. For each feature map we can define the formula as: \f[ res = \frac{1}{W * H} \\sum X_{i,j} \f] Note: The real input is create by akg.tvm.placeholder Args: n (int): input batchsize. c (int): input channel. h (int): input height. w (int): input weight. pool_type (str): pooling mode, default average. attrs (str): Default None. kernel_name (str): a str about kernel_name Returns: tvm.tensor.Tensor of shape n * c * 1 * 1 """ input = akg.tvm.placeholder((n, c, h, w), name='input', dtype="float16") output = akg.topi.nn.global_pool(input, pool_type=pool_type) s = akg.tvm.create_schedule(output.op) with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True): mod = akg.build(s, [input, output], "cce", name=kernel_name, attrs=attrs, polyhedral=True) return mod
def globalavgpool(n, c, h, w, pool_type, attrs, kernel_name="global_pool"): """ Performs the global average pooling on the input. For each feature map we can define the formula as: \f[ res = \frac{1}{W * H} \\sum X_{i,j} \f] Note: The real input is create by akg.tvm.placeholder Args: n (int): input batchsize. c (int): input channel. h (int): input height. w (int): input weight. pool_type (str): pooling mode, default average. attrs (str): Default None. kernel_name (str): a str about kernel_name Returns: tvm.tensor.Tensor of shape n * c * 1 * 1 """ input = akg.tvm.placeholder((n, c, h, w), name='input', dtype="float16") output = akg.topi.nn.global_pool(input, pool_type=pool_type) s = akg.tvm.create_schedule(output.op) with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True): mod = akg.build(s, [input, output], "cce", name=kernel_name, attrs=attrs, polyhedral=True) return mod
Python
def rsqrt(x): """ Computes reciprocal of square root of x element-wise Parameters ---------- x: Tensor Returns ------- res: Tensor The result of reciprocal of square root of x """ return numpy.ones_like(x) / numpy.sqrt(x)
def rsqrt(x): """ Computes reciprocal of square root of x element-wise Parameters ---------- x: Tensor Returns ------- res: Tensor The result of reciprocal of square root of x """ return numpy.ones_like(x) / numpy.sqrt(x)
Python
def auto_cast_of_reduce(func, *args, **kwargs): """ auto cast dectorator. Note: Before calling elewise api, check the input tensor is supported by the intr. If not supported, casting the input tensor to supported dtype. (On condition that the cast type is supported.If the cast type is not supported,raising a RuntimeError). """ intr = func.__name__ save_op_output_dtype(func, *args) supported_types = reduce_supported_types[intr] if len(args) == 3: raw_tensor = args[0] axis = args[1] keepdims = args[2] dtype = raw_tensor.dtype temp_tensor = raw_tensor if dtype not in supported_types: temp_tensor = cast(raw_tensor, "float16") return func(temp_tensor, axis, keepdims) return func(*args, **kwargs)
def auto_cast_of_reduce(func, *args, **kwargs): """ auto cast dectorator. Note: Before calling elewise api, check the input tensor is supported by the intr. If not supported, casting the input tensor to supported dtype. (On condition that the cast type is supported.If the cast type is not supported,raising a RuntimeError). """ intr = func.__name__ save_op_output_dtype(func, *args) supported_types = reduce_supported_types[intr] if len(args) == 3: raw_tensor = args[0] axis = args[1] keepdims = args[2] dtype = raw_tensor.dtype temp_tensor = raw_tensor if dtype not in supported_types: temp_tensor = cast(raw_tensor, "float16") return func(temp_tensor, axis, keepdims) return func(*args, **kwargs)
Python
def sum(raw_tensor, axis, keepdims=False): """ calculate sum of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) keepdims (bool): if true, retains reduced dimensions with length 1, default value is None Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_sum", keepdims)
def sum(raw_tensor, axis, keepdims=False): """ calculate sum of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) keepdims (bool): if true, retains reduced dimensions with length 1, default value is None Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_sum", keepdims)
Python
def reduce_min(raw_tensor, axis, keepdims=False): """ calculate reduce_min of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) keepdims (bool): if true, retains reduced dimensions with length 1, default value is None Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_min", keepdims)
def reduce_min(raw_tensor, axis, keepdims=False): """ calculate reduce_min of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) keepdims (bool): if true, retains reduced dimensions with length 1, default value is None Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_min", keepdims)
Python
def reduce_max(raw_tensor, axis, keepdims=False): """ calculate reduce_max of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor keepdims (bool): if true, retains reduced dimensions with length 1, default value is None axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_max", keepdims)
def reduce_max(raw_tensor, axis, keepdims=False): """ calculate reduce_max of raw_tensor, only support float16 Args: raw_tensor (tvm.tensor.Tensor): input tensor keepdims (bool): if true, retains reduced dimensions with length 1, default value is None axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1]) Returns: tvm.tensor.Tensor, res """ return single_reduce_op(raw_tensor, axis, "reduce_max", keepdims)
Python
def single_reduce_op(input_tensor, axis, op, keepdims=False): """factory method of single reduce operations""" def reduce_compute(data_shape, axis, tensor, func): def compute_func(*indice): count_indice = 0 count_reduce = 0 res_indice = [] for index in range(len(data_shape)): if index not in axis: res_indice.append(indice[count_indice]) count_indice += 1 else: res_indice.append(reduce_axises[count_reduce]) count_reduce += 1 if keepdims: count_indice += 1 return func(tensor(*res_indice), axis=reduce_axises) reduce_axises = [] for index, axis_num in enumerate(axis): reduce_axises.append(akg.tvm.reduce_axis((0, data_shape[axis_num]), name='k' + str(index + 1))) res_reshape = [] for index, shape_l in enumerate(data_shape): if index not in axis: res_reshape.append(shape_l) else: if keepdims: res_reshape.append(1) if is_last_axis and not keepdims: res_reshape.append(1) name = "reduce_" + str(name_index[0]) name_index[0] += 1 reduce_res = akg.tvm.compute(res_reshape, compute_func, name=name) return reduce_res if op.lower() == "reduce_min": reduce_func = akg.tvm.min elif op.lower() == "reduce_max": reduce_func = akg.tvm.max elif op.lower() == "reduce_sum": reduce_func = akg.tvm.sum else: raise RuntimeError("Not Support yet for op %s." % op) op_tensor = input_tensor shape = shape_to_list(op_tensor.shape) res_axis = refine_axis(axis, shape) if not res_axis: return input_tensor for i in res_axis: is_last_axis = (i == len(shape) - 1) if is_last_axis: break with akg.tvm.tag_scope(op.lower()): res = reduce_compute(shape, res_axis, op_tensor, reduce_func) return res
def single_reduce_op(input_tensor, axis, op, keepdims=False): """factory method of single reduce operations""" def reduce_compute(data_shape, axis, tensor, func): def compute_func(*indice): count_indice = 0 count_reduce = 0 res_indice = [] for index in range(len(data_shape)): if index not in axis: res_indice.append(indice[count_indice]) count_indice += 1 else: res_indice.append(reduce_axises[count_reduce]) count_reduce += 1 if keepdims: count_indice += 1 return func(tensor(*res_indice), axis=reduce_axises) reduce_axises = [] for index, axis_num in enumerate(axis): reduce_axises.append(akg.tvm.reduce_axis((0, data_shape[axis_num]), name='k' + str(index + 1))) res_reshape = [] for index, shape_l in enumerate(data_shape): if index not in axis: res_reshape.append(shape_l) else: if keepdims: res_reshape.append(1) if is_last_axis and not keepdims: res_reshape.append(1) name = "reduce_" + str(name_index[0]) name_index[0] += 1 reduce_res = akg.tvm.compute(res_reshape, compute_func, name=name) return reduce_res if op.lower() == "reduce_min": reduce_func = akg.tvm.min elif op.lower() == "reduce_max": reduce_func = akg.tvm.max elif op.lower() == "reduce_sum": reduce_func = akg.tvm.sum else: raise RuntimeError("Not Support yet for op %s." % op) op_tensor = input_tensor shape = shape_to_list(op_tensor.shape) res_axis = refine_axis(axis, shape) if not res_axis: return input_tensor for i in res_axis: is_last_axis = (i == len(shape) - 1) if is_last_axis: break with akg.tvm.tag_scope(op.lower()): res = reduce_compute(shape, res_axis, op_tensor, reduce_func) return res
Python
def quantize_chk_cfg_and_gen_outdtype( quant_algo, scale_mode, scale_sqrt, qdrtensors): """check all the params is valid, and general output dtype""" # check quantize algorithm and quantize scale type if quant_algo is None: # quantize switch off if scale_mode is not None or scale_sqrt is not None \ or qdrtensors is not None: raise RuntimeError("Invalid Quantize Config.") out_dtype = "float16" return out_dtype # quantize switch on, all quantize params should not be None if scale_mode is None or scale_sqrt is None \ or qdrtensors is None: raise RuntimeError("Invalid Quantize Config!") if len(quant_algo) != 2 or any([i not in [0, 1] for i in quant_algo]): raise RuntimeError("Invalid Quantize Config!!!") # check quantize algorithm if quant_algo[0] not in (0, 1): raise RuntimeError("Quantize algorithm just support 0 for non " "offset and 1 for half offset, but get {}." "".format(quant_algo[0])) # check quantize scale type if quant_algo[1] != 0: raise RuntimeError("Quantize scale only support SCALAR now.") # non offset get int8, half offset get uint8 out_dtype = "int8" if quant_algo[0] == 0 else "uint8" if scale_mode not in (0, 1, 2): raise ValueError("Invalid scale mode, just support '0,1,2' but get " "{}!".format(scale_mode)) # now scale mode limit if scale_mode in (0, 1): raise RuntimeError("quantized_avg_pool just support requantize now!") # check scale method if scale_sqrt not in (0, 1): raise RuntimeError("Invalid scale moethod!") # scalar scale type for now if len(qdrtensors) != 2: raise RuntimeError("qdrtensors should contain two tensors for scale " "and offset!") if get_shape(qdrtensors[0]) != [1] or get_shape(qdrtensors[1]) != [1]: raise RuntimeError("Scale for dequantize or requantize only " "support scalar tensor.") utils.ops_dtype_check(qdrtensors[0].dtype, utils.DtypeForDavinci.FLOAT16) utils.ops_dtype_check(qdrtensors[1].dtype, utils.DtypeForDavinci.FLOAT16) # utils.ops_dtype_check(qdrtensors[0].dtype, # utils.DtypeForDavinci.ALL_FLOAT) # utils.ops_dtype_check(qdrtensors[1].dtype, # utils.DtypeForDavinci.ALL_FLOAT) return out_dtype
def quantize_chk_cfg_and_gen_outdtype( quant_algo, scale_mode, scale_sqrt, qdrtensors): """check all the params is valid, and general output dtype""" # check quantize algorithm and quantize scale type if quant_algo is None: # quantize switch off if scale_mode is not None or scale_sqrt is not None \ or qdrtensors is not None: raise RuntimeError("Invalid Quantize Config.") out_dtype = "float16" return out_dtype # quantize switch on, all quantize params should not be None if scale_mode is None or scale_sqrt is None \ or qdrtensors is None: raise RuntimeError("Invalid Quantize Config!") if len(quant_algo) != 2 or any([i not in [0, 1] for i in quant_algo]): raise RuntimeError("Invalid Quantize Config!!!") # check quantize algorithm if quant_algo[0] not in (0, 1): raise RuntimeError("Quantize algorithm just support 0 for non " "offset and 1 for half offset, but get {}." "".format(quant_algo[0])) # check quantize scale type if quant_algo[1] != 0: raise RuntimeError("Quantize scale only support SCALAR now.") # non offset get int8, half offset get uint8 out_dtype = "int8" if quant_algo[0] == 0 else "uint8" if scale_mode not in (0, 1, 2): raise ValueError("Invalid scale mode, just support '0,1,2' but get " "{}!".format(scale_mode)) # now scale mode limit if scale_mode in (0, 1): raise RuntimeError("quantized_avg_pool just support requantize now!") # check scale method if scale_sqrt not in (0, 1): raise RuntimeError("Invalid scale moethod!") # scalar scale type for now if len(qdrtensors) != 2: raise RuntimeError("qdrtensors should contain two tensors for scale " "and offset!") if get_shape(qdrtensors[0]) != [1] or get_shape(qdrtensors[1]) != [1]: raise RuntimeError("Scale for dequantize or requantize only " "support scalar tensor.") utils.ops_dtype_check(qdrtensors[0].dtype, utils.DtypeForDavinci.FLOAT16) utils.ops_dtype_check(qdrtensors[1].dtype, utils.DtypeForDavinci.FLOAT16) # utils.ops_dtype_check(qdrtensors[0].dtype, # utils.DtypeForDavinci.ALL_FLOAT) # utils.ops_dtype_check(qdrtensors[1].dtype, # utils.DtypeForDavinci.ALL_FLOAT) return out_dtype
Python
def quantized_maxpool_tiling_strategy(data, kernel, stride, pad, quant_algo): """Custom tiling for quantized maxpool.""" batch, c_1, fm_h, fm_w, c_0 = get_shape(data) _, [out_h, out_w] = \ cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad) strategy = list() if c_0 == 16: h_cut = out_h if fm_h >= 50 and fm_w >= 50: h_cut = 3 dim_ind = 0 tiling_params = list() if batch > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 if c_1 > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 tiling_params.append([h_cut, ct_util.TileConstraint.FACTOR, dim_ind]) tiling_params.append(["H", ct_util.TileConstraint.SET_AXIS_INFO, dim_ind]) tiling_params.append([out_w, ct_util.TileConstraint.FACTOR, dim_ind + 1]) if quant_algo is not None: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 2]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 4]) else: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 4]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 2]) for para in tiling_params: strategy += ct_util.create_constraint_on_axis( values=para[0], constraints=para[1], axis=para[2]) # if batch > 1: # strategy += ct_util.create_constraint_on_axis( # values=1, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # dim_ind = dim_ind + 1 # if c_1 > 1: # strategy += ct_util.create_constraint_on_axis( # values=1, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # dim_ind = dim_ind + 1 # strategy += ct_util.create_constraint_on_axis( # values=h_cut, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # strategy += ct_util.create_constraint_on_axis( # values="H", # constraints=ct_util.TileConstraint.SET_AXIS_INFO, # axis=dim_ind) # strategy += ct_util.create_constraint_on_axis( # values=out_w, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+1) # strategy += ct_util.create_constraint_on_axis( # values=kernel[0], # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+2) # strategy += ct_util.create_constraint_on_axis( # values=kernel[1], # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+3) # strategy += ct_util.create_constraint_on_axis( # values=16, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+4) return strategy
def quantized_maxpool_tiling_strategy(data, kernel, stride, pad, quant_algo): """Custom tiling for quantized maxpool.""" batch, c_1, fm_h, fm_w, c_0 = get_shape(data) _, [out_h, out_w] = \ cal_pad_shapes_by_strategy(get_shape(data), kernel, stride, pad) strategy = list() if c_0 == 16: h_cut = out_h if fm_h >= 50 and fm_w >= 50: h_cut = 3 dim_ind = 0 tiling_params = list() if batch > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 if c_1 > 1: tiling_params.append([1, ct_util.TileConstraint.FACTOR, dim_ind]) dim_ind = dim_ind + 1 tiling_params.append([h_cut, ct_util.TileConstraint.FACTOR, dim_ind]) tiling_params.append(["H", ct_util.TileConstraint.SET_AXIS_INFO, dim_ind]) tiling_params.append([out_w, ct_util.TileConstraint.FACTOR, dim_ind + 1]) if quant_algo is not None: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 2]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 4]) else: tiling_params.append([kernel[0], ct_util.TileConstraint.FACTOR, dim_ind + 3]) tiling_params.append([kernel[1], ct_util.TileConstraint.FACTOR, dim_ind + 4]) tiling_params.append([16, ct_util.TileConstraint.FACTOR, dim_ind + 2]) for para in tiling_params: strategy += ct_util.create_constraint_on_axis( values=para[0], constraints=para[1], axis=para[2]) # if batch > 1: # strategy += ct_util.create_constraint_on_axis( # values=1, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # dim_ind = dim_ind + 1 # if c_1 > 1: # strategy += ct_util.create_constraint_on_axis( # values=1, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # dim_ind = dim_ind + 1 # strategy += ct_util.create_constraint_on_axis( # values=h_cut, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind) # strategy += ct_util.create_constraint_on_axis( # values="H", # constraints=ct_util.TileConstraint.SET_AXIS_INFO, # axis=dim_ind) # strategy += ct_util.create_constraint_on_axis( # values=out_w, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+1) # strategy += ct_util.create_constraint_on_axis( # values=kernel[0], # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+2) # strategy += ct_util.create_constraint_on_axis( # values=kernel[1], # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+3) # strategy += ct_util.create_constraint_on_axis( # values=16, # constraints=ct_util.TileConstraint.FACTOR, # axis=dim_ind+4) return strategy
Python
def ConvBackpropFilter(data, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=None, target=utils.CCE): """ Computes dw according "conv forward". Args: data (list[tvm.tensor.Tensor]): list with length 2. data[0](consider as dy) Tensor of type float16 ,shape 5D(out_n, out_c//C0, out_h, out_w,C0) data[1](consider as x) Tensor of type float16 ,shape 5D(fN,fC//C0,fH,fW,C0) fmap_shape (list[int]): [fN, fC, fH, fW] filter_shape (list[int]): [wN, wC, wH, wW] pad_ (list[int]): [pad_left, pad_right, pad_top, pad_bottom] stride_ (list[int]): [stride_h, stride_w] dilation_ (list[int]): [dilation_h, dilation_w] attrs (dict): a dict with keys like conv_tile,bypass. Returns: tvm.tensor.Tensor. configs. Supported Platforms: 'Ascend' """ if len(data) != 2: raise IndexError("data contains output tensor and feature map tensor") utils.convolution_format_check(fmap_shape, filter_shape, pad_, stride_, dilation_) block_size = 16 in_n, in_c, in_h, in_w = fmap_shape cout, _, w_h, w_w = filter_shape in_c = (in_c + block_size - 1) // block_size * block_size cout = (cout + block_size - 1) // block_size * block_size pad_top, pad_bottom, pad_left, pad_right = pad_ stride_h, stride_w = stride_ dilation_h, dilation_w = dilation_ if dilation_h != 1 or dilation_w != 1: raise ValueError("The value of elements in dilation must be 1") out_n = in_n out_c = cout out_h = (in_h + pad_top + pad_bottom - w_h) // stride_h + 1 out_w = (in_w + pad_left + pad_right - w_w) // stride_w + 1 dy_shape = (out_n, out_c, out_h, out_w) dx_shape = (in_n, in_c, in_h, in_w) dw_shape = (cout, in_c, w_h, w_w) key = gen_key(fmap_shape, filter_shape, pad_, stride_, dilation_) res_c, configs = conv_backprop_filter_compute(data, dx_shape, dw_shape, dy_shape, pad_, stride_, dilation_, block_size=block_size, attrs=attrs, key=key) return res_c, configs
def ConvBackpropFilter(data, fmap_shape, filter_shape, pad_, stride_, dilation_, attrs=None, target=utils.CCE): """ Computes dw according "conv forward". Args: data (list[tvm.tensor.Tensor]): list with length 2. data[0](consider as dy) Tensor of type float16 ,shape 5D(out_n, out_c//C0, out_h, out_w,C0) data[1](consider as x) Tensor of type float16 ,shape 5D(fN,fC//C0,fH,fW,C0) fmap_shape (list[int]): [fN, fC, fH, fW] filter_shape (list[int]): [wN, wC, wH, wW] pad_ (list[int]): [pad_left, pad_right, pad_top, pad_bottom] stride_ (list[int]): [stride_h, stride_w] dilation_ (list[int]): [dilation_h, dilation_w] attrs (dict): a dict with keys like conv_tile,bypass. Returns: tvm.tensor.Tensor. configs. Supported Platforms: 'Ascend' """ if len(data) != 2: raise IndexError("data contains output tensor and feature map tensor") utils.convolution_format_check(fmap_shape, filter_shape, pad_, stride_, dilation_) block_size = 16 in_n, in_c, in_h, in_w = fmap_shape cout, _, w_h, w_w = filter_shape in_c = (in_c + block_size - 1) // block_size * block_size cout = (cout + block_size - 1) // block_size * block_size pad_top, pad_bottom, pad_left, pad_right = pad_ stride_h, stride_w = stride_ dilation_h, dilation_w = dilation_ if dilation_h != 1 or dilation_w != 1: raise ValueError("The value of elements in dilation must be 1") out_n = in_n out_c = cout out_h = (in_h + pad_top + pad_bottom - w_h) // stride_h + 1 out_w = (in_w + pad_left + pad_right - w_w) // stride_w + 1 dy_shape = (out_n, out_c, out_h, out_w) dx_shape = (in_n, in_c, in_h, in_w) dw_shape = (cout, in_c, w_h, w_w) key = gen_key(fmap_shape, filter_shape, pad_, stride_, dilation_) res_c, configs = conv_backprop_filter_compute(data, dx_shape, dw_shape, dy_shape, pad_, stride_, dilation_, block_size=block_size, attrs=attrs, key=key) return res_c, configs
Python
def bessel_i1e(x, target=utils.CCE): """ The modified Bessel i1e function. ..math:: `I1e(x) = (e^{-|x|}) * (x/2) * ( 1 + (((x^2)/4)^1)/(1!*2!) + (((x^2)/4)^2)/(2!*3!) + ... + (((x^2)/4)^n)/(n!*(n+1)!)` Args: x (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor. The modified Bessel i1e function of x element-wise. Has the same type as x. """ # check shape utils.check_shape(x) # check input tensor data_type utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT) res = _bessel_i1e_compute(x) return res
def bessel_i1e(x, target=utils.CCE): """ The modified Bessel i1e function. ..math:: `I1e(x) = (e^{-|x|}) * (x/2) * ( 1 + (((x^2)/4)^1)/(1!*2!) + (((x^2)/4)^2)/(2!*3!) + ... + (((x^2)/4)^n)/(n!*(n+1)!)` Args: x (tvm.tensor.Tensor): Tensor of type float16, float32. Returns: tvm.tensor.Tensor. The modified Bessel i1e function of x element-wise. Has the same type as x. """ # check shape utils.check_shape(x) # check input tensor data_type utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT) res = _bessel_i1e_compute(x) return res
Python
def _pack_buffer(buf): """Build intrinsics that packs the buffer. """ assert buf.shape shape = _make.Call("handle", "tvm_stack_make_shape", buf.shape, _Call.Intrinsic, None, 0) strides = _make.Call("handle", "tvm_stack_make_shape", buf.strides, _Call.Intrinsic, None, 0) if buf.strides else 0 pack_args = [buf.data, shape, strides, len(buf.shape), const(0, dtype=buf.dtype), buf.elem_offset] return _make.Call("handle", "tvm_stack_make_array", pack_args, _Call.Intrinsic, None, 0)
def _pack_buffer(buf): """Build intrinsics that packs the buffer. """ assert buf.shape shape = _make.Call("handle", "tvm_stack_make_shape", buf.shape, _Call.Intrinsic, None, 0) strides = _make.Call("handle", "tvm_stack_make_shape", buf.strides, _Call.Intrinsic, None, 0) if buf.strides else 0 pack_args = [buf.data, shape, strides, len(buf.shape), const(0, dtype=buf.dtype), buf.elem_offset] return _make.Call("handle", "tvm_stack_make_array", pack_args, _Call.Intrinsic, None, 0)
Python
def call_packed(*args): """Build expression by call an external packed function. The argument to packed function can be Expr or Buffer. The argument is the corresponding POD type when Expr is presented. When the argument is Buffer, the corresponding PackedFunc will recieve an TVMArrayHandle whose content is valid during the callback period. If the PackedFunc is a python callback, then the corresponding argument is NDArray. Parameters ---------- args : list of Expr or Buffer. Positional arguments. Returns ------- call : Expr The call expression. See Also -------- tvm.extern : Create tensor with extern function call. """ call_args = [_pack_buffer(x) if isinstance(x, _Buffer) else x for x in args] return _make.Call( "int32", "tvm_call_packed", call_args, _Call.Intrinsic, None, 0)
def call_packed(*args): """Build expression by call an external packed function. The argument to packed function can be Expr or Buffer. The argument is the corresponding POD type when Expr is presented. When the argument is Buffer, the corresponding PackedFunc will recieve an TVMArrayHandle whose content is valid during the callback period. If the PackedFunc is a python callback, then the corresponding argument is NDArray. Parameters ---------- args : list of Expr or Buffer. Positional arguments. Returns ------- call : Expr The call expression. See Also -------- tvm.extern : Create tensor with extern function call. """ call_args = [_pack_buffer(x) if isinstance(x, _Buffer) else x for x in args] return _make.Call( "int32", "tvm_call_packed", call_args, _Call.Intrinsic, None, 0)
Python
def call_pure_intrin(dtype, func_name, *args): """Build expression by calling a pure intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : Expr The call expression. """ args = convert(args) return _make.Call( dtype, func_name, convert(args), _Call.PureIntrinsic, None, 0)
def call_pure_intrin(dtype, func_name, *args): """Build expression by calling a pure intrinsic function. Intrinsics can be overloaded with multiple data types via the intrinsic translation rule. Parameters ---------- dtype : str The data type of the result. func_name: str The intrinsic function name. args : list Positional arguments. Returns ------- call : Expr The call expression. """ args = convert(args) return _make.Call( dtype, func_name, convert(args), _Call.PureIntrinsic, None, 0)