code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _add_filter_node(root, filter_, value): """Adds filter xml node to root.""" filter_el = ElementTree.SubElement(root, 'Filter') filter_el.set('name', filter_.name) # Set filter value depending on type. if filter_.type == 'boolean': # Boolean case. if value is True or value.lower() in {'included', 'only'}: filter_el.set('excluded', '0') elif value is False or value.lower() == 'excluded': filter_el.set('excluded', '1') else: raise ValueError('Invalid value for boolean filter ({})' .format(value)) elif isinstance(value, list) or isinstance(value, tuple): # List case. filter_el.set('value', ','.join(map(str, value))) else: # Default case. filter_el.set('value', str(value))
Adds filter xml node to root.
Below is the the instruction that describes the task: ### Input: Adds filter xml node to root. ### Response: def _add_filter_node(root, filter_, value): """Adds filter xml node to root.""" filter_el = ElementTree.SubElement(root, 'Filter') filter_el.set('name', filter_.name) # Set filter value depending on type. if filter_.type == 'boolean': # Boolean case. if value is True or value.lower() in {'included', 'only'}: filter_el.set('excluded', '0') elif value is False or value.lower() == 'excluded': filter_el.set('excluded', '1') else: raise ValueError('Invalid value for boolean filter ({})' .format(value)) elif isinstance(value, list) or isinstance(value, tuple): # List case. filter_el.set('value', ','.join(map(str, value))) else: # Default case. filter_el.set('value', str(value))
def fill_fw_dict_from_db(self, fw_data): """ This routine is called to create a local fw_dict with data from DB. """ rule_dict = fw_data.get('rules').get('rules') fw_dict = {'fw_id': fw_data.get('fw_id'), 'fw_name': fw_data.get('name'), 'firewall_policy_id': fw_data.get('firewall_policy_id'), 'fw_type': fw_data.get('fw_type'), 'router_id': fw_data.get('router_id'), 'rules': {}} for rule in rule_dict: fw_dict['rules'][rule] = rule_dict.get(rule) return fw_dict
This routine is called to create a local fw_dict with data from DB.
Below is the the instruction that describes the task: ### Input: This routine is called to create a local fw_dict with data from DB. ### Response: def fill_fw_dict_from_db(self, fw_data): """ This routine is called to create a local fw_dict with data from DB. """ rule_dict = fw_data.get('rules').get('rules') fw_dict = {'fw_id': fw_data.get('fw_id'), 'fw_name': fw_data.get('name'), 'firewall_policy_id': fw_data.get('firewall_policy_id'), 'fw_type': fw_data.get('fw_type'), 'router_id': fw_data.get('router_id'), 'rules': {}} for rule in rule_dict: fw_dict['rules'][rule] = rule_dict.get(rule) return fw_dict
def SetConsoleTitle(text: str) -> bool: """ SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False. """ return bool(ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(text)))
SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False.
Below is the the instruction that describes the task: ### Input: SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False. ### Response: def SetConsoleTitle(text: str) -> bool: """ SetConsoleTitle from Win32. text: str. Return bool, True if succeed otherwise False. """ return bool(ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(text)))
def predict(self, X, break_ties="random", return_probs=False, **kwargs): """Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities] """ Y_s = self.predict_proba(X, **kwargs) self._check(Y_s, typ=list) self._check(Y_s[0], typ=np.ndarray) Y_p = [] for Y_ts in Y_s: Y_tp = self._break_ties(Y_ts, break_ties) Y_p.append(Y_tp.astype(np.int)) if return_probs: return Y_p, Y_s else: return Y_p
Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities]
Below is the the instruction that describes the task: ### Input: Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities] ### Response: def predict(self, X, break_ties="random", return_probs=False, **kwargs): """Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities] """ Y_s = self.predict_proba(X, **kwargs) self._check(Y_s, typ=list) self._check(Y_s[0], typ=np.ndarray) Y_p = [] for Y_ts in Y_s: Y_tp = self._break_ties(Y_ts, break_ties) Y_p.append(Y_tp.astype(np.int)) if return_probs: return Y_p, Y_s else: return Y_p
def update_service(self, stack, service, args): """更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) return self.__post(url, args)
更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
Below is the the instruction that describes the task: ### Input: 更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 ### Response: def update_service(self, stack, service, args): """更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) return self.__post(url, args)
def create_bool(help_string=NO_HELP, default=NO_DEFAULT): # type: (str, Union[bool, NO_DEFAULT_TYPE]) -> bool """ Create a bool parameter :param help_string: :param default: :return: """ # noinspection PyTypeChecker return ParamFunctions( help_string=help_string, default=default, type_name="bool", function_s2t=convert_string_to_bool, function_t2s=convert_bool_to_string, )
Create a bool parameter :param help_string: :param default: :return:
Below is the the instruction that describes the task: ### Input: Create a bool parameter :param help_string: :param default: :return: ### Response: def create_bool(help_string=NO_HELP, default=NO_DEFAULT): # type: (str, Union[bool, NO_DEFAULT_TYPE]) -> bool """ Create a bool parameter :param help_string: :param default: :return: """ # noinspection PyTypeChecker return ParamFunctions( help_string=help_string, default=default, type_name="bool", function_s2t=convert_string_to_bool, function_t2s=convert_bool_to_string, )
def get(self, path, *args, **kwargs): '''GET the provided endpoint''' target = self._host.relative(path).utf8 if not isinstance(target, basestring): # on older versions of the `url` library, .utf8 is a method, not a property target = target() params = kwargs.get('params', {}) params.update(self._params) kwargs['params'] = params logger.debug('GET %s with %s, %s', target, args, kwargs) return requests.get(target, *args, **kwargs)
GET the provided endpoint
Below is the the instruction that describes the task: ### Input: GET the provided endpoint ### Response: def get(self, path, *args, **kwargs): '''GET the provided endpoint''' target = self._host.relative(path).utf8 if not isinstance(target, basestring): # on older versions of the `url` library, .utf8 is a method, not a property target = target() params = kwargs.get('params', {}) params.update(self._params) kwargs['params'] = params logger.debug('GET %s with %s, %s', target, args, kwargs) return requests.get(target, *args, **kwargs)
def tile(self, zoom, row, col): """ Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` """ tile = self.tile_pyramid.tile(zoom, row, col) return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile``
Below is the the instruction that describes the task: ### Input: Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` ### Response: def tile(self, zoom, row, col): """ Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` """ tile = self.tile_pyramid.tile(zoom, row, col) return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
def register_presence_callback(self, type_, from_, cb): """ Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "register_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.register_callback( type_, from_, cb, )
Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
Below is the the instruction that describes the task: ### Input: Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. ### Response: def register_presence_callback(self, type_, from_, cb): """ Register a callback to be called when a presence stanza is received. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :param cb: Callback function :raises ValueError: if another listener with the same ``(type_, from_)`` pair is already registered :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) `cb` will be called whenever a presence stanza matching the `type_` is received from the specified sender. `from_` may be :data:`None` to indicate a wildcard. Like with :meth:`register_message_callback`, more specific callbacks win over less specific callbacks. The fallback order is identical, except that the ``type_=None`` entries described there do not apply for presence stanzas and are thus omitted. See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact wildcarding rules. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "register_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.register_callback( type_, from_, cb, )
def getargspec(method): """ Drill through layers of decorators attempting to locate the actual argspec for a method. """ argspec = _getargspec(method) args = argspec[0] if args and args[0] == 'self': return argspec if hasattr(method, '__func__'): method = method.__func__ func_closure = six.get_function_closure(method) # NOTE(sileht): if the closure is None we cannot look deeper, # so return actual argspec, this occurs when the method # is static for example. if not func_closure: return argspec closure = None # In the case of deeply nested decorators (with arguments), it's possible # that there are several callables in scope; Take a best guess and go # with the one that looks most like a pecan controller function # (has a __code__ object, and 'self' is the first argument) func_closure = filter( lambda c: ( six.callable(c.cell_contents) and hasattr(c.cell_contents, '__code__') ), func_closure ) func_closure = sorted( func_closure, key=lambda c: 'self' in c.cell_contents.__code__.co_varnames, reverse=True ) closure = func_closure[0] method = closure.cell_contents return getargspec(method)
Drill through layers of decorators attempting to locate the actual argspec for a method.
Below is the the instruction that describes the task: ### Input: Drill through layers of decorators attempting to locate the actual argspec for a method. ### Response: def getargspec(method): """ Drill through layers of decorators attempting to locate the actual argspec for a method. """ argspec = _getargspec(method) args = argspec[0] if args and args[0] == 'self': return argspec if hasattr(method, '__func__'): method = method.__func__ func_closure = six.get_function_closure(method) # NOTE(sileht): if the closure is None we cannot look deeper, # so return actual argspec, this occurs when the method # is static for example. if not func_closure: return argspec closure = None # In the case of deeply nested decorators (with arguments), it's possible # that there are several callables in scope; Take a best guess and go # with the one that looks most like a pecan controller function # (has a __code__ object, and 'self' is the first argument) func_closure = filter( lambda c: ( six.callable(c.cell_contents) and hasattr(c.cell_contents, '__code__') ), func_closure ) func_closure = sorted( func_closure, key=lambda c: 'self' in c.cell_contents.__code__.co_varnames, reverse=True ) closure = func_closure[0] method = closure.cell_contents return getargspec(method)
def handle(cls, vm, args): """ Setup forwarding connection to given VM and pipe docker cmds over SSH. """ docker = Iaas.info(vm) if not docker: raise Exception('docker vm %s not found' % vm) if docker['state'] != 'running': Iaas.start(vm) # XXX remote_addr = docker['ifaces'][0]['ips'][0]['ip'] port = unixpipe.setup(remote_addr, 'root', '/var/run/docker.sock') os.environ['DOCKER_HOST'] = 'tcp://localhost:%d' % port cls.echo('using DOCKER_HOST=%s' % os.environ['DOCKER_HOST']) subprocess.call(['docker'] + list(args))
Setup forwarding connection to given VM and pipe docker cmds over SSH.
Below is the the instruction that describes the task: ### Input: Setup forwarding connection to given VM and pipe docker cmds over SSH. ### Response: def handle(cls, vm, args): """ Setup forwarding connection to given VM and pipe docker cmds over SSH. """ docker = Iaas.info(vm) if not docker: raise Exception('docker vm %s not found' % vm) if docker['state'] != 'running': Iaas.start(vm) # XXX remote_addr = docker['ifaces'][0]['ips'][0]['ip'] port = unixpipe.setup(remote_addr, 'root', '/var/run/docker.sock') os.environ['DOCKER_HOST'] = 'tcp://localhost:%d' % port cls.echo('using DOCKER_HOST=%s' % os.environ['DOCKER_HOST']) subprocess.call(['docker'] + list(args))
def cv(row, col_name, arg, current_data_model, df, con): """ row[col_name] must contain only values from the appropriate controlled vocabulary """ vocabulary = con.vocab.vocabularies cell_value = str(row[col_name]) if not cell_value: return None elif cell_value == "None": return None cell_values = cell_value.split(":") cell_values = [c.strip() for c in cell_values] # get possible values for controlled vocabulary # exclude weird unicode possible_values = [] for val in vocabulary[col_name]: try: possible_values.append(str(val).lower()) except UnicodeEncodeError as ex: print(val, ex) for value in cell_values: if str(value).lower() == "nan": continue elif str(value).lower() in possible_values: continue elif value.lower() == "none": continue else: try: if str(float(value)) in possible_values: continue except: pass return '"{}" is not in controlled vocabulary for {}'.format(value, arg) return None
row[col_name] must contain only values from the appropriate controlled vocabulary
Below is the the instruction that describes the task: ### Input: row[col_name] must contain only values from the appropriate controlled vocabulary ### Response: def cv(row, col_name, arg, current_data_model, df, con): """ row[col_name] must contain only values from the appropriate controlled vocabulary """ vocabulary = con.vocab.vocabularies cell_value = str(row[col_name]) if not cell_value: return None elif cell_value == "None": return None cell_values = cell_value.split(":") cell_values = [c.strip() for c in cell_values] # get possible values for controlled vocabulary # exclude weird unicode possible_values = [] for val in vocabulary[col_name]: try: possible_values.append(str(val).lower()) except UnicodeEncodeError as ex: print(val, ex) for value in cell_values: if str(value).lower() == "nan": continue elif str(value).lower() in possible_values: continue elif value.lower() == "none": continue else: try: if str(float(value)) in possible_values: continue except: pass return '"{}" is not in controlled vocabulary for {}'.format(value, arg) return None
def has(self, module_name, class_name, attribute_name): """ Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute """ self.read(module_name) return self.parser.has_option(class_name, attribute_name)
Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute
Below is the the instruction that describes the task: ### Input: Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute ### Response: def has(self, module_name, class_name, attribute_name): """ Parameters ---------- module_name: String The analysis_path of the module class_name: String The analysis_path of the class attribute_name: String The analysis_path of the attribute Returns ------- has_prior: bool True iff a prior exists for the module, class and attribute """ self.read(module_name) return self.parser.has_option(class_name, attribute_name)
def remove_from_organization(self, delete_account=False): """ Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another. """ self.append(removeFromOrg={"deleteAccount": True if delete_account else False}) return None
Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another.
Below is the the instruction that describes the task: ### Input: Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another. ### Response: def remove_from_organization(self, delete_account=False): """ Remove a user from the organization's list of visible users. Optionally also delete the account. Deleting the account can only be done if the organization owns the account's domain. :param delete_account: Whether to delete the account after removing from the organization (default false) :return: None, because you cannot follow this command with another. """ self.append(removeFromOrg={"deleteAccount": True if delete_account else False}) return None
def decrby(self, key, decrement): """Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int """ if not isinstance(decrement, int): raise TypeError("decrement must be of type int") return self.execute(b'DECRBY', key, decrement)
Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int
Below is the the instruction that describes the task: ### Input: Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int ### Response: def decrby(self, key, decrement): """Decrement the integer value of a key by the given number. :raises TypeError: if decrement is not int """ if not isinstance(decrement, int): raise TypeError("decrement must be of type int") return self.execute(b'DECRBY', key, decrement)
def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell.
Below is the the instruction that describes the task: ### Input: Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. ### Response: def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs
Elu function
Below is the the instruction that describes the task: ### Input: Elu function ### Response: def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs
def set_pd_mag_from_counts(photodict, c='', ec='', lec='', uec='', zp=DEFAULT_ZP, sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a counts measurement.""" with localcontext() as ctx: if lec == '' or uec == '': lec = ec uec = ec prec = max( get_sig_digits(str(c), strip_zeroes=False), get_sig_digits(str(lec), strip_zeroes=False), get_sig_digits(str(uec), strip_zeroes=False)) + 1 ctx.prec = prec dlec = Decimal(str(lec)) duec = Decimal(str(uec)) if c != '': dc = Decimal(str(c)) dzp = Decimal(str(zp)) dsig = Decimal(str(sig)) photodict[PHOTOMETRY.ZERO_POINT] = str(zp) if c == '' or float(c) < float(sig) * float(uec): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - (D25 * (dsig * duec ).log10())) dnec = Decimal('10.0') ** ( (dzp - Decimal(photodict[PHOTOMETRY.MAGNITUDE])) / D25) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dnec + duec).log10() - dnec.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - D25 * dc.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dc + duec).log10() - dc.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dc.log10() - (dc - dlec).log10()))
Set photometry dictionary from a counts measurement.
Below is the the instruction that describes the task: ### Input: Set photometry dictionary from a counts measurement. ### Response: def set_pd_mag_from_counts(photodict, c='', ec='', lec='', uec='', zp=DEFAULT_ZP, sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a counts measurement.""" with localcontext() as ctx: if lec == '' or uec == '': lec = ec uec = ec prec = max( get_sig_digits(str(c), strip_zeroes=False), get_sig_digits(str(lec), strip_zeroes=False), get_sig_digits(str(uec), strip_zeroes=False)) + 1 ctx.prec = prec dlec = Decimal(str(lec)) duec = Decimal(str(uec)) if c != '': dc = Decimal(str(c)) dzp = Decimal(str(zp)) dsig = Decimal(str(sig)) photodict[PHOTOMETRY.ZERO_POINT] = str(zp) if c == '' or float(c) < float(sig) * float(uec): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - (D25 * (dsig * duec ).log10())) dnec = Decimal('10.0') ** ( (dzp - Decimal(photodict[PHOTOMETRY.MAGNITUDE])) / D25) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dnec + duec).log10() - dnec.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(dzp - D25 * dc.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dc + duec).log10() - dc.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dc.log10() - (dc - dlec).log10()))
def scale(arr, mn=0, mx=1): """ Apply min-max scaling (normalize) then scale to (mn,mx) """ amn = arr.min() amx = arr.max() # normalize: arr = (arr - amn) / (amx - amn) # scale: if amn != mn or amx != mx: arr *= mx - mn arr += mn return arr
Apply min-max scaling (normalize) then scale to (mn,mx)
Below is the the instruction that describes the task: ### Input: Apply min-max scaling (normalize) then scale to (mn,mx) ### Response: def scale(arr, mn=0, mx=1): """ Apply min-max scaling (normalize) then scale to (mn,mx) """ amn = arr.min() amx = arr.max() # normalize: arr = (arr - amn) / (amx - amn) # scale: if amn != mn or amx != mx: arr *= mx - mn arr += mn return arr
def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
For each row in the input, compute a like-shaped array of per-row ranks.
Below is the the instruction that describes the task: ### Input: For each row in the input, compute a like-shaped array of per-row ranks. ### Response: def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)): """finds and installs translation functions for package""" translation = get_translation_for(package_name) return [getattr(translation, x) for x in names]
finds and installs translation functions for package
Below is the the instruction that describes the task: ### Input: finds and installs translation functions for package ### Response: def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)): """finds and installs translation functions for package""" translation = get_translation_for(package_name) return [getattr(translation, x) for x in names]
def gilliland_cdpp(times, mags, errs, windowlength=97, polyorder=2, binsize=23400, # in seconds: 6.5 hours for classic CDPP sigclip=5.0, magsarefluxes=False, **kwargs): '''This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value. ''' # if no errs are given, assume 0.1% errors if errs is None: errs = 0.001*mags # get rid of nans first find = npisfinite(times) & npisfinite(mags) & npisfinite(errs) ftimes = times[find] fmags = mags[find] ferrs = errs[find] if ftimes.size < (3*windowlength): LOGERROR('not enough LC points to calculate CDPP') return npnan # now get the smoothed mag series using the filter # kwargs are provided to the savgol_filter function smoothed = savgol_filter(fmags, windowlength, polyorder, **kwargs) subtracted = fmags - smoothed # sigclip the subtracted light curve stimes, smags, serrs = sigclip_magseries(ftimes, subtracted, ferrs, magsarefluxes=magsarefluxes) # bin over 6.5 hour bins and throw away all bins with less than 7 elements binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=binsize, minbinelems=7) bmags = binned['binnedmags'] # stdev of bin mags x 1.168 -> CDPP cdpp = npstd(bmags) * 1.168 return cdpp
This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value.
Below is the the instruction that describes the task: ### Input: This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value. ### Response: def gilliland_cdpp(times, mags, errs, windowlength=97, polyorder=2, binsize=23400, # in seconds: 6.5 hours for classic CDPP sigclip=5.0, magsarefluxes=False, **kwargs): '''This calculates the CDPP of a timeseries using the method in the paper: Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6 (http://adsabs.harvard.edu/abs/2011ApJS..197....6G) The steps are: - pass the time-series through a Savitsky-Golay filter. - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this. - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay. - the `windowlength` is the number of LC points to use (Kepler uses 2 days = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC points). - the `polyorder` is a quadratic by default. - subtract the smoothed time-series from the actual light curve. - sigma clip the remaining LC. - get the binned mag series by averaging over 6.5 hour bins, only retaining bins with at least 7 points. - the standard deviation of the binned averages is the CDPP. - multiply this by 1.168 to correct for over-subtraction of white-noise. Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to calculate CDPP for. windowlength : int The smoothing window size to use. polyorder : int The polynomial order to use in the Savitsky-Golay smoothing. binsize : int The bin size to use for binning the light curve. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, indicates the input time-series is fluxes and not mags. kwargs : additional kwargs These are passed directly to `scipy.signal.savgol_filter`. Returns ------- float The calculated CDPP value. ''' # if no errs are given, assume 0.1% errors if errs is None: errs = 0.001*mags # get rid of nans first find = npisfinite(times) & npisfinite(mags) & npisfinite(errs) ftimes = times[find] fmags = mags[find] ferrs = errs[find] if ftimes.size < (3*windowlength): LOGERROR('not enough LC points to calculate CDPP') return npnan # now get the smoothed mag series using the filter # kwargs are provided to the savgol_filter function smoothed = savgol_filter(fmags, windowlength, polyorder, **kwargs) subtracted = fmags - smoothed # sigclip the subtracted light curve stimes, smags, serrs = sigclip_magseries(ftimes, subtracted, ferrs, magsarefluxes=magsarefluxes) # bin over 6.5 hour bins and throw away all bins with less than 7 elements binned = time_bin_magseries_with_errs(stimes, smags, serrs, binsize=binsize, minbinelems=7) bmags = binned['binnedmags'] # stdev of bin mags x 1.168 -> CDPP cdpp = npstd(bmags) * 1.168 return cdpp
def download_file_part_run(download_context): """ Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create. """ destination_dir, file_url_data_dict, seek_amt, bytes_to_read = download_context.params project_file = ProjectFile(file_url_data_dict) local_path = project_file.get_local_path(destination_dir) retry_chunk_downloader = RetryChunkDownloader(project_file, local_path, seek_amt, bytes_to_read, download_context) retry_chunk_downloader.run() return 'ok'
Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create.
Below is the the instruction that describes the task: ### Input: Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create. ### Response: def download_file_part_run(download_context): """ Function run by CreateProjectCommand to create the project. Runs in a background process. :param download_context: UploadContext: contains data service setup and project name to create. """ destination_dir, file_url_data_dict, seek_amt, bytes_to_read = download_context.params project_file = ProjectFile(file_url_data_dict) local_path = project_file.get_local_path(destination_dir) retry_chunk_downloader = RetryChunkDownloader(project_file, local_path, seek_amt, bytes_to_read, download_context) retry_chunk_downloader.run() return 'ok'
def cql_encode_all_types(self, val, as_text_type=False): """ Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) if as_text_type and not isinstance(encoded, six.text_type): return encoded.decode('utf-8') return encoded
Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type.
Below is the the instruction that describes the task: ### Input: Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type. ### Response: def cql_encode_all_types(self, val, as_text_type=False): """ Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) if as_text_type and not isinstance(encoded, six.text_type): return encoded.decode('utf-8') return encoded
def set_property_filter(filter_proto, name, op, value): """Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' """ filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
Below is the the instruction that describes the task: ### Input: Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' ### Response: def set_property_filter(filter_proto, name, op, value): """Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' """ filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
def convert_money(self, money: Money, to: str, reverse: bool=False) -> Money: """Convert money to another currency""" converted = self.convert(money.amount, money.currency, to, reverse) return Money(converted, to)
Convert money to another currency
Below is the the instruction that describes the task: ### Input: Convert money to another currency ### Response: def convert_money(self, money: Money, to: str, reverse: bool=False) -> Money: """Convert money to another currency""" converted = self.convert(money.amount, money.currency, to, reverse) return Money(converted, to)
def is_identity(): """Check to see if this matrix is an identity matrix.""" for index, row in enumerate(self.dta): if row[index] == 1: for num, element in enumerate(row): if num != index: if element != 0: return False else: return False return True
Check to see if this matrix is an identity matrix.
Below is the the instruction that describes the task: ### Input: Check to see if this matrix is an identity matrix. ### Response: def is_identity(): """Check to see if this matrix is an identity matrix.""" for index, row in enumerate(self.dta): if row[index] == 1: for num, element in enumerate(row): if num != index: if element != 0: return False else: return False return True
def _gen_unzip(it, elem_len): """Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. """ elem = next(it) first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem
Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``.
Below is the the instruction that describes the task: ### Input: Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. ### Response: def _gen_unzip(it, elem_len): """Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. """ elem = next(it) first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem
def session_callback(self, signal): """Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device. """ if signal == SIGNAL_DATA: self.event.new_event(self.data) elif signal == SIGNAL_FAILED: self.retry() if signal in [SIGNAL_PLAYING, SIGNAL_FAILED] and \ self.connection_status_callback: self.connection_status_callback(signal)
Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device.
Below is the the instruction that describes the task: ### Input: Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device. ### Response: def session_callback(self, signal): """Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device. """ if signal == SIGNAL_DATA: self.event.new_event(self.data) elif signal == SIGNAL_FAILED: self.retry() if signal in [SIGNAL_PLAYING, SIGNAL_FAILED] and \ self.connection_status_callback: self.connection_status_callback(signal)
def list_subscriptions(self, topic_name): ''' Retrieves the subscriptions in the specified topic. topic_name: Name of the topic. ''' _validate_not_none('topic_name', topic_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _str(topic_name) + '/subscriptions/' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _ETreeXmlToObject.convert_response_to_feeds( response, _convert_etree_element_to_subscription)
Retrieves the subscriptions in the specified topic. topic_name: Name of the topic.
Below is the the instruction that describes the task: ### Input: Retrieves the subscriptions in the specified topic. topic_name: Name of the topic. ### Response: def list_subscriptions(self, topic_name): ''' Retrieves the subscriptions in the specified topic. topic_name: Name of the topic. ''' _validate_not_none('topic_name', topic_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/' + _str(topic_name) + '/subscriptions/' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _ETreeXmlToObject.convert_response_to_feeds( response, _convert_etree_element_to_subscription)
def center_blit(target, source, dest = (0, 0), area=None, special_flags=0): ''' Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset. ''' loc = lambda d, s: _vec(d.get_size()) / 2 - _vec(s.get_size()) / 2 _blitter(loc, target, source, dest, area, special_flags)
Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset.
Below is the the instruction that describes the task: ### Input: Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset. ### Response: def center_blit(target, source, dest = (0, 0), area=None, special_flags=0): ''' Blits surface @source to the center of surface @target. Takes the normal Surface.blit() flags; however, @dest is used as an offset. ''' loc = lambda d, s: _vec(d.get_size()) / 2 - _vec(s.get_size()) / 2 _blitter(loc, target, source, dest, area, special_flags)
def _range(self, memory, addr, **kwargs): """ Gets the (min, max) range of solutions for an address. """ return (self._min(memory, addr, **kwargs), self._max(memory, addr, **kwargs))
Gets the (min, max) range of solutions for an address.
Below is the the instruction that describes the task: ### Input: Gets the (min, max) range of solutions for an address. ### Response: def _range(self, memory, addr, **kwargs): """ Gets the (min, max) range of solutions for an address. """ return (self._min(memory, addr, **kwargs), self._max(memory, addr, **kwargs))
def get_name(component): """ Attempt to get the string name of component, including module and class if applicable. """ if six.callable(component): name = getattr(component, "__qualname__", component.__name__) return '.'.join([component.__module__, name]) return str(component)
Attempt to get the string name of component, including module and class if applicable.
Below is the the instruction that describes the task: ### Input: Attempt to get the string name of component, including module and class if applicable. ### Response: def get_name(component): """ Attempt to get the string name of component, including module and class if applicable. """ if six.callable(component): name = getattr(component, "__qualname__", component.__name__) return '.'.join([component.__module__, name]) return str(component)
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.year = None else: self.year = vals[i] i += 1 if len(vals[i]) == 0: self.month = None else: self.month = vals[i] i += 1 if len(vals[i]) == 0: self.day = None else: self.day = vals[i] i += 1 if len(vals[i]) == 0: self.hour = None else: self.hour = vals[i] i += 1 if len(vals[i]) == 0: self.minute = None else: self.minute = vals[i] i += 1 if len(vals[i]) == 0: self.data_source_and_uncertainty_flags = None else: self.data_source_and_uncertainty_flags = vals[i] i += 1 if len(vals[i]) == 0: self.dry_bulb_temperature = None else: self.dry_bulb_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.dew_point_temperature = None else: self.dew_point_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.relative_humidity = None else: self.relative_humidity = vals[i] i += 1 if len(vals[i]) == 0: self.atmospheric_station_pressure = None else: self.atmospheric_station_pressure = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_horizontal_radiation = None else: self.extraterrestrial_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_direct_normal_radiation = None else: self.extraterrestrial_direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.horizontal_infrared_radiation_intensity = None else: self.horizontal_infrared_radiation_intensity = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_radiation = None else: self.global_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_radiation = None else: self.direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_radiation = None else: self.diffuse_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_illuminance = None else: self.global_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_illuminance = None else: self.direct_normal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_illuminance = None else: self.diffuse_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.zenith_luminance = None else: self.zenith_luminance = vals[i] i += 1 if len(vals[i]) == 0: self.wind_direction = None else: self.wind_direction = vals[i] i += 1 if len(vals[i]) == 0: self.wind_speed = None else: self.wind_speed = vals[i] i += 1 if len(vals[i]) == 0: self.total_sky_cover = None else: self.total_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.opaque_sky_cover = None else: self.opaque_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.visibility = None else: self.visibility = vals[i] i += 1 if len(vals[i]) == 0: self.ceiling_height = None else: self.ceiling_height = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_observation = None else: self.present_weather_observation = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_codes = None else: self.present_weather_codes = vals[i] i += 1 if len(vals[i]) == 0: self.precipitable_water = None else: self.precipitable_water = vals[i] i += 1 if len(vals[i]) == 0: self.aerosol_optical_depth = None else: self.aerosol_optical_depth = vals[i] i += 1 if len(vals[i]) == 0: self.snow_depth = None else: self.snow_depth = vals[i] i += 1 if len(vals[i]) == 0: self.days_since_last_snowfall = None else: self.days_since_last_snowfall = vals[i] i += 1 if len(vals[i]) == 0: self.albedo = None else: self.albedo = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_depth = None else: self.liquid_precipitation_depth = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_quantity = None else: self.liquid_precipitation_quantity = vals[i] i += 1
Read values. Args: vals (list): list of strings representing values
Below is the the instruction that describes the task: ### Input: Read values. Args: vals (list): list of strings representing values ### Response: def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.year = None else: self.year = vals[i] i += 1 if len(vals[i]) == 0: self.month = None else: self.month = vals[i] i += 1 if len(vals[i]) == 0: self.day = None else: self.day = vals[i] i += 1 if len(vals[i]) == 0: self.hour = None else: self.hour = vals[i] i += 1 if len(vals[i]) == 0: self.minute = None else: self.minute = vals[i] i += 1 if len(vals[i]) == 0: self.data_source_and_uncertainty_flags = None else: self.data_source_and_uncertainty_flags = vals[i] i += 1 if len(vals[i]) == 0: self.dry_bulb_temperature = None else: self.dry_bulb_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.dew_point_temperature = None else: self.dew_point_temperature = vals[i] i += 1 if len(vals[i]) == 0: self.relative_humidity = None else: self.relative_humidity = vals[i] i += 1 if len(vals[i]) == 0: self.atmospheric_station_pressure = None else: self.atmospheric_station_pressure = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_horizontal_radiation = None else: self.extraterrestrial_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.extraterrestrial_direct_normal_radiation = None else: self.extraterrestrial_direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.horizontal_infrared_radiation_intensity = None else: self.horizontal_infrared_radiation_intensity = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_radiation = None else: self.global_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_radiation = None else: self.direct_normal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_radiation = None else: self.diffuse_horizontal_radiation = vals[i] i += 1 if len(vals[i]) == 0: self.global_horizontal_illuminance = None else: self.global_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.direct_normal_illuminance = None else: self.direct_normal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.diffuse_horizontal_illuminance = None else: self.diffuse_horizontal_illuminance = vals[i] i += 1 if len(vals[i]) == 0: self.zenith_luminance = None else: self.zenith_luminance = vals[i] i += 1 if len(vals[i]) == 0: self.wind_direction = None else: self.wind_direction = vals[i] i += 1 if len(vals[i]) == 0: self.wind_speed = None else: self.wind_speed = vals[i] i += 1 if len(vals[i]) == 0: self.total_sky_cover = None else: self.total_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.opaque_sky_cover = None else: self.opaque_sky_cover = vals[i] i += 1 if len(vals[i]) == 0: self.visibility = None else: self.visibility = vals[i] i += 1 if len(vals[i]) == 0: self.ceiling_height = None else: self.ceiling_height = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_observation = None else: self.present_weather_observation = vals[i] i += 1 if len(vals[i]) == 0: self.present_weather_codes = None else: self.present_weather_codes = vals[i] i += 1 if len(vals[i]) == 0: self.precipitable_water = None else: self.precipitable_water = vals[i] i += 1 if len(vals[i]) == 0: self.aerosol_optical_depth = None else: self.aerosol_optical_depth = vals[i] i += 1 if len(vals[i]) == 0: self.snow_depth = None else: self.snow_depth = vals[i] i += 1 if len(vals[i]) == 0: self.days_since_last_snowfall = None else: self.days_since_last_snowfall = vals[i] i += 1 if len(vals[i]) == 0: self.albedo = None else: self.albedo = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_depth = None else: self.liquid_precipitation_depth = vals[i] i += 1 if len(vals[i]) == 0: self.liquid_precipitation_quantity = None else: self.liquid_precipitation_quantity = vals[i] i += 1
def update(self, friendly_name=values.unset, code_length=values.unset, lookup_enabled=values.unset, skip_sms_to_landlines=values.unset, dtmf_input_required=values.unset, tts_name=values.unset, psd2_enabled=values.unset): """ Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance """ return self._proxy.update( friendly_name=friendly_name, code_length=code_length, lookup_enabled=lookup_enabled, skip_sms_to_landlines=skip_sms_to_landlines, dtmf_input_required=dtmf_input_required, tts_name=tts_name, psd2_enabled=psd2_enabled, )
Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance
Below is the the instruction that describes the task: ### Input: Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance ### Response: def update(self, friendly_name=values.unset, code_length=values.unset, lookup_enabled=values.unset, skip_sms_to_landlines=values.unset, dtmf_input_required=values.unset, tts_name=values.unset, psd2_enabled=values.unset): """ Update the ServiceInstance :param unicode friendly_name: A string to describe the verification service :param unicode code_length: The length of the verification code to generate :param bool lookup_enabled: Whether to perform a lookup with each verification :param bool skip_sms_to_landlines: Whether to skip sending SMS verifications to landlines :param bool dtmf_input_required: Whether to ask the user to press a number before delivering the verify code in a phone call :param unicode tts_name: The name of an alternative text-to-speech service to use in phone calls :param bool psd2_enabled: Whether to pass PSD2 transaction parameters when starting a verification :returns: Updated ServiceInstance :rtype: twilio.rest.verify.v2.service.ServiceInstance """ return self._proxy.update( friendly_name=friendly_name, code_length=code_length, lookup_enabled=lookup_enabled, skip_sms_to_landlines=skip_sms_to_landlines, dtmf_input_required=dtmf_input_required, tts_name=tts_name, psd2_enabled=psd2_enabled, )
def _tracing_information(): """Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. """ # We'll collate trace information if the B3 headers have been collected: values = b3.values() if values[b3.b3_trace_id]: # Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1 # However we're not currently using Zipkin, so it's always false # exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false" return [ current_app.name if current_app.name else " - ", values[b3.b3_trace_id], values[b3.b3_span_id], "false", ]
Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
Below is the the instruction that describes the task: ### Input: Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. ### Response: def _tracing_information(): """Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. """ # We'll collate trace information if the B3 headers have been collected: values = b3.values() if values[b3.b3_trace_id]: # Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1 # However we're not currently using Zipkin, so it's always false # exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false" return [ current_app.name if current_app.name else " - ", values[b3.b3_trace_id], values[b3.b3_span_id], "false", ]
def load_labware(self, labware: Labware) -> Labware: """ Load labware onto a Magnetic Module, checking if it is compatible """ if labware.magdeck_engage_height is None: MODULE_LOG.warning( "This labware ({}) is not explicitly compatible with the" " Magnetic Module. You will have to specify a height when" " calling engage().") return super().load_labware(labware)
Load labware onto a Magnetic Module, checking if it is compatible
Below is the the instruction that describes the task: ### Input: Load labware onto a Magnetic Module, checking if it is compatible ### Response: def load_labware(self, labware: Labware) -> Labware: """ Load labware onto a Magnetic Module, checking if it is compatible """ if labware.magdeck_engage_height is None: MODULE_LOG.warning( "This labware ({}) is not explicitly compatible with the" " Magnetic Module. You will have to specify a height when" " calling engage().") return super().load_labware(labware)
def register_view(self, view): """Register callbacks for button press events and selection changed""" super(ListViewController, self).register_view(view) self.tree_view.connect('button_press_event', self.mouse_click)
Register callbacks for button press events and selection changed
Below is the the instruction that describes the task: ### Input: Register callbacks for button press events and selection changed ### Response: def register_view(self, view): """Register callbacks for button press events and selection changed""" super(ListViewController, self).register_view(view) self.tree_view.connect('button_press_event', self.mouse_click)
def resize(self, width, height): """ Sets the new size and buffer size internally """ self.width = width self.height = height self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window) self.set_default_viewport()
Sets the new size and buffer size internally
Below is the the instruction that describes the task: ### Input: Sets the new size and buffer size internally ### Response: def resize(self, width, height): """ Sets the new size and buffer size internally """ self.width = width self.height = height self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window) self.set_default_viewport()
def to(self, to): """ [Edge-only] especifica el destino del lado """ if self._type.lower() != 'edge': raise ValueError('Cannot set From/To to non-edge objects') self._to = to return self
[Edge-only] especifica el destino del lado
Below is the the instruction that describes the task: ### Input: [Edge-only] especifica el destino del lado ### Response: def to(self, to): """ [Edge-only] especifica el destino del lado """ if self._type.lower() != 'edge': raise ValueError('Cannot set From/To to non-edge objects') self._to = to return self
def from_raw_profiles(cls, raw_profiles, profile_name, cli_vars, target_override=None, threads_override=None): """ :param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object. """ if profile_name not in raw_profiles: raise DbtProjectError( "Could not find profile named '{}'".format(profile_name) ) # First, we've already got our final decision on profile name, and we # don't render keys, so we can pluck that out raw_profile = raw_profiles[profile_name] user_cfg = raw_profiles.get('config') return cls.from_raw_profile_info( raw_profile=raw_profile, profile_name=profile_name, cli_vars=cli_vars, user_cfg=user_cfg, target_override=target_override, threads_override=threads_override, )
:param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object.
Below is the the instruction that describes the task: ### Input: :param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object. ### Response: def from_raw_profiles(cls, raw_profiles, profile_name, cli_vars, target_override=None, threads_override=None): """ :param raw_profiles dict: The profile data, from disk as yaml. :param profile_name str: The profile name to use. :param cli_vars dict: The command-line variables passed as arguments, as a dict. :param target_override Optional[str]: The target to use, if provided on the command line. :param threads_override Optional[str]: The thread count to use, if provided on the command line. :raises DbtProjectError: If there is no profile name specified in the project or the command line arguments :raises DbtProfileError: If the profile is invalid or missing, or the target could not be found :returns Profile: The new Profile object. """ if profile_name not in raw_profiles: raise DbtProjectError( "Could not find profile named '{}'".format(profile_name) ) # First, we've already got our final decision on profile name, and we # don't render keys, so we can pluck that out raw_profile = raw_profiles[profile_name] user_cfg = raw_profiles.get('config') return cls.from_raw_profile_info( raw_profile=raw_profile, profile_name=profile_name, cli_vars=cli_vars, user_cfg=user_cfg, target_override=target_override, threads_override=threads_override, )
def cfg_from_file(self, yaml_filename, config_dict): """Load a config file and merge it into the default options.""" import yaml from easydict import EasyDict as edict with open(yaml_filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) return self._merge_a_into_b(yaml_cfg, config_dict)
Load a config file and merge it into the default options.
Below is the the instruction that describes the task: ### Input: Load a config file and merge it into the default options. ### Response: def cfg_from_file(self, yaml_filename, config_dict): """Load a config file and merge it into the default options.""" import yaml from easydict import EasyDict as edict with open(yaml_filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) return self._merge_a_into_b(yaml_cfg, config_dict)
def _store(self, con): """Store a database connection for subsequent use.""" self._con = con self._transaction = False self._closed = False self._usage = 0
Store a database connection for subsequent use.
Below is the the instruction that describes the task: ### Input: Store a database connection for subsequent use. ### Response: def _store(self, con): """Store a database connection for subsequent use.""" self._con = con self._transaction = False self._closed = False self._usage = 0
def getcal(self): """Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal() """ status, cal, cal_error, offset, offset_err, data_type = \ _C.SDgetcal(self._id) _checkErr('getcal', status, 'no calibration record') return cal, cal_error, offset, offset_err, data_type
Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal()
Below is the the instruction that describes the task: ### Input: Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal() ### Response: def getcal(self): """Retrieve the SDS calibration coefficients. Args:: no argument Returns:: 5-element tuple holding: - cal: calibration factor (attribute 'scale_factor') - cal_error : calibration factor error (attribute 'scale_factor_err') - offset: calibration offset (attribute 'add_offset') - offset_err : offset error (attribute 'add_offset_err') - data_type : type of the data resulting from applying the calibration formula to the dataset values (attribute 'calibrated_nt') An exception is raised if no calibration data are defined. Original dataset values 'orival' are converted to calibrated values 'calval' through the formula:: calval = cal * (orival - offset) The calibration coefficients are part of the so-called "standard" SDS attributes. The values inside the tuple returned by 'getcal' are those of the following attributes, in order:: scale_factor, scale_factor_err, add_offset, add_offset_err, calibrated_nt C library equivalent: SDgetcal() """ status, cal, cal_error, offset, offset_err, data_type = \ _C.SDgetcal(self._id) _checkErr('getcal', status, 'no calibration record') return cal, cal_error, offset, offset_err, data_type
def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[ops.Qid, int] = None ) -> Dict[ops.Qid, int]: """Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed. """ if frontier is None: frontier = defaultdict(lambda: 0) operations = tuple(ops.flatten_op_tree(operations)) if not operations: return frontier qubits = set(q for op in operations for q in op.qubits) if any(frontier[q] > start for q in qubits): raise ValueError('The frontier for qubits on which the operations' 'to insert act cannot be after start.') next_moments = self.next_moments_operating_on(qubits, start) insertion_indices, _ = self._pick_inserted_ops_moment_indices( operations, start, frontier) self._push_frontier(frontier, next_moments) self._insert_operations(operations, insertion_indices) return frontier
Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed.
Below is the the instruction that describes the task: ### Input: Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed. ### Response: def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[ops.Qid, int] = None ) -> Dict[ops.Qid, int]: """Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed. """ if frontier is None: frontier = defaultdict(lambda: 0) operations = tuple(ops.flatten_op_tree(operations)) if not operations: return frontier qubits = set(q for op in operations for q in op.qubits) if any(frontier[q] > start for q in qubits): raise ValueError('The frontier for qubits on which the operations' 'to insert act cannot be after start.') next_moments = self.next_moments_operating_on(qubits, start) insertion_indices, _ = self._pick_inserted_ops_moment_indices( operations, start, frontier) self._push_frontier(frontier, next_moments) self._insert_operations(operations, insertion_indices) return frontier
def _normalize_orders(self): """Helper: adjust orders based on cursors, where clauses.""" orders = list(self._orders) _has_snapshot_cursor = False if self._start_at: if isinstance(self._start_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if self._end_at: if isinstance(self._end_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if _has_snapshot_cursor: should_order = [ _enum_from_op_string(key) for key in _COMPARISON_OPERATORS if key not in (_EQ_OP, "array_contains") ] order_keys = [order.field.field_path for order in orders] for filter_ in self._field_filters: field = filter_.field.field_path if filter_.op in should_order and field not in order_keys: orders.append(self._make_order(field, "ASCENDING")) if not orders: orders.append(self._make_order("__name__", "ASCENDING")) else: order_keys = [order.field.field_path for order in orders] if "__name__" not in order_keys: direction = orders[-1].direction # enum? orders.append(self._make_order("__name__", direction)) return orders
Helper: adjust orders based on cursors, where clauses.
Below is the the instruction that describes the task: ### Input: Helper: adjust orders based on cursors, where clauses. ### Response: def _normalize_orders(self): """Helper: adjust orders based on cursors, where clauses.""" orders = list(self._orders) _has_snapshot_cursor = False if self._start_at: if isinstance(self._start_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if self._end_at: if isinstance(self._end_at[0], document.DocumentSnapshot): _has_snapshot_cursor = True if _has_snapshot_cursor: should_order = [ _enum_from_op_string(key) for key in _COMPARISON_OPERATORS if key not in (_EQ_OP, "array_contains") ] order_keys = [order.field.field_path for order in orders] for filter_ in self._field_filters: field = filter_.field.field_path if filter_.op in should_order and field not in order_keys: orders.append(self._make_order(field, "ASCENDING")) if not orders: orders.append(self._make_order("__name__", "ASCENDING")) else: order_keys = [order.field.field_path for order in orders] if "__name__" not in order_keys: direction = orders[-1].direction # enum? orders.append(self._make_order("__name__", direction)) return orders
def update_stats(self, stats, delta, sample_rate=1): """ Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10) """ if not isinstance(stats, list): stats = [stats] data = dict((stat, "%s|c" % delta) for stat in stats) self.send(data, sample_rate)
Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10)
Below is the the instruction that describes the task: ### Input: Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10) ### Response: def update_stats(self, stats, delta, sample_rate=1): """ Updates one or more stats counters by arbitrary amounts >>> statsd_client.update_stats('some.int',10) """ if not isinstance(stats, list): stats = [stats] data = dict((stat, "%s|c" % delta) for stat in stats) self.send(data, sample_rate)
def display_eventtype(self): """Read the list of event types in the annotations and update widgets. """ if self.annot is not None: event_types = sorted(self.annot.event_types, key=str.lower) else: event_types = [] self.idx_eventtype.clear() evttype_group = QGroupBox('Event Types') layout = QVBoxLayout() evttype_group.setLayout(layout) self.check_all_eventtype = check_all = QCheckBox('All event types') check_all.setCheckState(Qt.Checked) check_all.clicked.connect(self.toggle_eventtype) layout.addWidget(check_all) self.idx_eventtype_list = [] for one_eventtype in event_types: self.idx_eventtype.addItem(one_eventtype) item = QCheckBox(one_eventtype) layout.addWidget(item) item.setCheckState(Qt.Checked) item.stateChanged.connect(self.update_annotations) item.stateChanged.connect(self.toggle_check_all_eventtype) self.idx_eventtype_list.append(item) self.idx_eventtype_scroll.setWidget(evttype_group)
Read the list of event types in the annotations and update widgets.
Below is the the instruction that describes the task: ### Input: Read the list of event types in the annotations and update widgets. ### Response: def display_eventtype(self): """Read the list of event types in the annotations and update widgets. """ if self.annot is not None: event_types = sorted(self.annot.event_types, key=str.lower) else: event_types = [] self.idx_eventtype.clear() evttype_group = QGroupBox('Event Types') layout = QVBoxLayout() evttype_group.setLayout(layout) self.check_all_eventtype = check_all = QCheckBox('All event types') check_all.setCheckState(Qt.Checked) check_all.clicked.connect(self.toggle_eventtype) layout.addWidget(check_all) self.idx_eventtype_list = [] for one_eventtype in event_types: self.idx_eventtype.addItem(one_eventtype) item = QCheckBox(one_eventtype) layout.addWidget(item) item.setCheckState(Qt.Checked) item.stateChanged.connect(self.update_annotations) item.stateChanged.connect(self.toggle_check_all_eventtype) self.idx_eventtype_list.append(item) self.idx_eventtype_scroll.setWidget(evttype_group)
def plot_linear_relation(x, y, x_err=None, y_err=None, title=None, point_label=None, legend=None, plot_range=None, plot_range_y=None, x_label=None, y_label=None, y_2_label=None, log_x=False, log_y=False, size=None, filename=None): ''' Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen ''' fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) if x_err is not None: x_err = [x_err, x_err] if y_err is not None: y_err = [y_err, y_err] ax.set_title(title) if y_label is not None: ax.set_ylabel(y_label) if log_x: ax.set_xscale('log') if log_y: ax.set_yscale('log') if plot_range: ax.set_xlim((min(plot_range), max(plot_range))) if plot_range_y: ax.set_ylim((min(plot_range_y), max(plot_range_y))) if legend: fig.legend(legend, 0) ax.grid(True) ax.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='black') # plot points # label points if needed if point_label is not None: for X, Y, Z in zip(x, y, point_label): ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(-5, 5), ha='right', textcoords='offset points') line_fit, _ = np.polyfit(x, y, 1, full=False, cov=True) fit_fn = np.poly1d(line_fit) ax.plot(x, fit_fn(x), '-', lw=2, color='gray') setp(ax.get_xticklabels(), visible=False) # remove ticks at common border of both plots divider = make_axes_locatable(ax) ax_bottom_plot = divider.append_axes("bottom", 2.0, pad=0.0, sharex=ax) ax_bottom_plot.bar(x, y - fit_fn(x), align='center', width=np.amin(np.diff(x)) / 2, color='gray') # plot(x, y - fit_fn(x)) ax_bottom_plot.grid(True) if x_label is not None: ax.set_xlabel(x_label) if y_2_label is not None: ax.set_ylabel(y_2_label) ax.set_ylim((-np.amax(np.abs(y - fit_fn(x)))), (np.amax(np.abs(y - fit_fn(x))))) ax.plot(ax.set_xlim(), [0, 0], '-', color='black') setp(ax_bottom_plot.get_yticklabels()[-2:-1], visible=False) if size is not None: fig.set_size_inches(size) if not filename: fig.show() elif isinstance(filename, PdfPages): filename.savefig(fig) elif filename: fig.savefig(filename, bbox_inches='tight') return fig
Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen
Below is the the instruction that describes the task: ### Input: Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen ### Response: def plot_linear_relation(x, y, x_err=None, y_err=None, title=None, point_label=None, legend=None, plot_range=None, plot_range_y=None, x_label=None, y_label=None, y_2_label=None, log_x=False, log_y=False, size=None, filename=None): ''' Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen ''' fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) if x_err is not None: x_err = [x_err, x_err] if y_err is not None: y_err = [y_err, y_err] ax.set_title(title) if y_label is not None: ax.set_ylabel(y_label) if log_x: ax.set_xscale('log') if log_y: ax.set_yscale('log') if plot_range: ax.set_xlim((min(plot_range), max(plot_range))) if plot_range_y: ax.set_ylim((min(plot_range_y), max(plot_range_y))) if legend: fig.legend(legend, 0) ax.grid(True) ax.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='black') # plot points # label points if needed if point_label is not None: for X, Y, Z in zip(x, y, point_label): ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(-5, 5), ha='right', textcoords='offset points') line_fit, _ = np.polyfit(x, y, 1, full=False, cov=True) fit_fn = np.poly1d(line_fit) ax.plot(x, fit_fn(x), '-', lw=2, color='gray') setp(ax.get_xticklabels(), visible=False) # remove ticks at common border of both plots divider = make_axes_locatable(ax) ax_bottom_plot = divider.append_axes("bottom", 2.0, pad=0.0, sharex=ax) ax_bottom_plot.bar(x, y - fit_fn(x), align='center', width=np.amin(np.diff(x)) / 2, color='gray') # plot(x, y - fit_fn(x)) ax_bottom_plot.grid(True) if x_label is not None: ax.set_xlabel(x_label) if y_2_label is not None: ax.set_ylabel(y_2_label) ax.set_ylim((-np.amax(np.abs(y - fit_fn(x)))), (np.amax(np.abs(y - fit_fn(x))))) ax.plot(ax.set_xlim(), [0, 0], '-', color='black') setp(ax_bottom_plot.get_yticklabels()[-2:-1], visible=False) if size is not None: fig.set_size_inches(size) if not filename: fig.show() elif isinstance(filename, PdfPages): filename.savefig(fig) elif filename: fig.savefig(filename, bbox_inches='tight') return fig
def _scheduleCombUpdateDoneEv(self) -> Event: """ Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable """ assert not self._combUpdateDonePlaned, self.now cud = Event(self) cud.process_to_wake.append(self.__deleteCombUpdateDoneEv()) self._add_process(cud, PRIORITY_AGENTS_UPDATE_DONE) self._combUpdateDonePlaned = True self.combUpdateDoneEv = cud return cud
Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable
Below is the the instruction that describes the task: ### Input: Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable ### Response: def _scheduleCombUpdateDoneEv(self) -> Event: """ Schedule combUpdateDoneEv event to let agents know that current delta step is ending and values from combinational logic are stable """ assert not self._combUpdateDonePlaned, self.now cud = Event(self) cud.process_to_wake.append(self.__deleteCombUpdateDoneEv()) self._add_process(cud, PRIORITY_AGENTS_UPDATE_DONE) self._combUpdateDonePlaned = True self.combUpdateDoneEv = cud return cud
def handleResponseEnd(self): """ Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes. """ try: if not self._finished: reactor.callInThread( self.resource.cacheContent, self.father, self._response, self.buffer ) proxy.ProxyClient.handleResponseEnd(self) except RuntimeError: # because we don't care if the user hits # refresh before the request is done pass
Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes.
Below is the the instruction that describes the task: ### Input: Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes. ### Response: def handleResponseEnd(self): """ Extends handleResponseEnd to not care about the user closing/refreshing their browser before the response is finished. Also calls cacheContent in a thread that we don't care when it finishes. """ try: if not self._finished: reactor.callInThread( self.resource.cacheContent, self.father, self._response, self.buffer ) proxy.ProxyClient.handleResponseEnd(self) except RuntimeError: # because we don't care if the user hits # refresh before the request is done pass
def _decrypt(self, value, encrypted_value): """Decrypt encoded cookies """ if sys.platform == 'win32': return self._decrypt_windows_chrome(value, encrypted_value) if value or (encrypted_value[:3] != b'v10'): return value # Encrypted cookies should be prefixed with 'v10' according to the # Chromium code. Strip it off. encrypted_value = encrypted_value[3:] encrypted_value_half_len = int(len(encrypted_value) / 2) cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv)) decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len]) decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:]) decrypted += cipher.feed() return decrypted.decode("utf-8")
Decrypt encoded cookies
Below is the the instruction that describes the task: ### Input: Decrypt encoded cookies ### Response: def _decrypt(self, value, encrypted_value): """Decrypt encoded cookies """ if sys.platform == 'win32': return self._decrypt_windows_chrome(value, encrypted_value) if value or (encrypted_value[:3] != b'v10'): return value # Encrypted cookies should be prefixed with 'v10' according to the # Chromium code. Strip it off. encrypted_value = encrypted_value[3:] encrypted_value_half_len = int(len(encrypted_value) / 2) cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv)) decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len]) decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:]) decrypted += cipher.feed() return decrypted.decode("utf-8")
def plot_or_print(my_turbine, e126, dummy_turbine): r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file. """ # plot or print turbine power output if plt: e126.power_output.plot(legend=True, label='Enercon E126') my_turbine.power_output.plot(legend=True, label='myTurbine') dummy_turbine.power_output.plot(legend=True, label='dummyTurbine') plt.show() else: print(e126.power_output) print(my_turbine.power_output) print(dummy_turbine.power_output) # plot or print power curve if plt: if e126.power_curve is not None: e126.power_curve.plot(x='wind_speed', y='value', style='*', title='Enercon E126 power curve') plt.show() if my_turbine.power_curve is not None: my_turbine.power_curve.plot(x='wind_speed', y='value', style='*', title='myTurbine power curve') plt.show() if dummy_turbine.power_coefficient_curve is not None: dummy_turbine.power_coefficient_curve.plot( x='wind_speed', y='value', style='*', title='dummyTurbine power coefficient curve') plt.show() else: if e126.power_coefficient_curve is not None: print(e126.power_coefficient_curve) if e126.power_curve is not None: print(e126.power_curve)
r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file.
Below is the the instruction that describes the task: ### Input: r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file. ### Response: def plot_or_print(my_turbine, e126, dummy_turbine): r""" Plots or prints power output and power (coefficient) curves. Parameters ---------- my_turbine : WindTurbine WindTurbine object with self provided power curve. e126 : WindTurbine WindTurbine object with power curve from data file provided by the windpowerlib. dummy_turbine : WindTurbine WindTurbine object with power coefficient curve from example file. """ # plot or print turbine power output if plt: e126.power_output.plot(legend=True, label='Enercon E126') my_turbine.power_output.plot(legend=True, label='myTurbine') dummy_turbine.power_output.plot(legend=True, label='dummyTurbine') plt.show() else: print(e126.power_output) print(my_turbine.power_output) print(dummy_turbine.power_output) # plot or print power curve if plt: if e126.power_curve is not None: e126.power_curve.plot(x='wind_speed', y='value', style='*', title='Enercon E126 power curve') plt.show() if my_turbine.power_curve is not None: my_turbine.power_curve.plot(x='wind_speed', y='value', style='*', title='myTurbine power curve') plt.show() if dummy_turbine.power_coefficient_curve is not None: dummy_turbine.power_coefficient_curve.plot( x='wind_speed', y='value', style='*', title='dummyTurbine power coefficient curve') plt.show() else: if e126.power_coefficient_curve is not None: print(e126.power_coefficient_curve) if e126.power_curve is not None: print(e126.power_curve)
def get_model_and_form_class(model, form_class): """ Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. """ if form_class: return form_class._meta.model, form_class if model: # The inner Meta class fails if model = model is used for some reason. tmp_model = model # TODO: we should be able to construct a ModelForm without creating # and passing in a temporary inner class. class Meta: model = tmp_model class_name = model.__name__ + 'Form' form_class = ModelFormMetaclass( class_name, (ModelForm,), {'Meta': Meta}) return model, form_class raise GenericViewError("Generic view must be called with either a model or" " form_class argument.")
Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``.
Below is the the instruction that describes the task: ### Input: Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. ### Response: def get_model_and_form_class(model, form_class): """ Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. """ if form_class: return form_class._meta.model, form_class if model: # The inner Meta class fails if model = model is used for some reason. tmp_model = model # TODO: we should be able to construct a ModelForm without creating # and passing in a temporary inner class. class Meta: model = tmp_model class_name = model.__name__ + 'Form' form_class = ModelFormMetaclass( class_name, (ModelForm,), {'Meta': Meta}) return model, form_class raise GenericViewError("Generic view must be called with either a model or" " form_class argument.")
def mongoimport(json, database, ip='localhost', port=27017, user=None, password=None, delim='_', delim1=None, delim2=None, delim_occurance=1, delim1_occurance=1, delim2_occurance=1): ''' Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1`` ''' logger = log.get_logger('mongodb') _print_mongoimport_info(logger) if type(json) in (list, tuple): pass elif os.path.isdir(json): from abtools.utils.pipeline import list_files json = list_files(json) else: json = [json, ] jsons = sorted([os.path.expanduser(j) for j in json if j.endswith('.json')]) collections = _get_import_collections(jsons, delim, delim_occurance, delim1, delim1_occurance, delim2, delim2_occurance) logger.info('Found {} files to import'.format(len(jsons))) logger.info('') for i, (json_file, collection) in enumerate(zip(jsons, collections)): logger.info('[ {} ] {} --> {}'.format(i + 1, os.path.basename(json_file), collection)) # logger.info("Performing mongoimport on {}.".format(os.path.basename(json_file))) # logger.info("Importing the file into collection {}.".format(collection)) if all([user, password]): host = '--host {} --port {} -username {} -password {}'.format(ip, port, user, password) else: host = '--host {} --port {}'.format(ip, port) mongo_cmd = "mongoimport {} --db {} --collection {} --file {}".format( host, database, collection, json_file) mongo = sp.Popen(mongo_cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = mongo.communicate()
Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1``
Below is the the instruction that describes the task: ### Input: Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1`` ### Response: def mongoimport(json, database, ip='localhost', port=27017, user=None, password=None, delim='_', delim1=None, delim2=None, delim_occurance=1, delim1_occurance=1, delim2_occurance=1): ''' Performs mongoimport on one or more json files. Args: json: Can be one of several things: - path to a single JSON file - an iterable (list or tuple) of one or more JSON file paths - path to a directory containing one or more JSON files database (str): Name of the database into which the JSON files will be imported ip (str): IP address of the MongoDB server. Default is ``localhost``. port (int): Port of the MongoDB database. Default is ``27017``. user (str): Username for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. password (str): Password for the MongoDB database, if authentication is enabled. Default is ``None``, which results in attempting connection without authentication. delim (str): Delimiter, when generating collection names using a single delimiter. Default is ``_`` delim_occurance (int): Occurance at which to split filename when using a single delimiter. Default is ``1`` delim1 (str): Left delimiter when splitting with two delimiters. Default is None. delim1_occurance (int): Occurance of ``delim1`` at which to split filename. Default is ``1`` delim2 (str): Right delimiter when splitting with two delimiters. Default is None. delim2_occurance (int): Occurance of ``delim2`` at which to split filename. Default is ``1`` ''' logger = log.get_logger('mongodb') _print_mongoimport_info(logger) if type(json) in (list, tuple): pass elif os.path.isdir(json): from abtools.utils.pipeline import list_files json = list_files(json) else: json = [json, ] jsons = sorted([os.path.expanduser(j) for j in json if j.endswith('.json')]) collections = _get_import_collections(jsons, delim, delim_occurance, delim1, delim1_occurance, delim2, delim2_occurance) logger.info('Found {} files to import'.format(len(jsons))) logger.info('') for i, (json_file, collection) in enumerate(zip(jsons, collections)): logger.info('[ {} ] {} --> {}'.format(i + 1, os.path.basename(json_file), collection)) # logger.info("Performing mongoimport on {}.".format(os.path.basename(json_file))) # logger.info("Importing the file into collection {}.".format(collection)) if all([user, password]): host = '--host {} --port {} -username {} -password {}'.format(ip, port, user, password) else: host = '--host {} --port {}'.format(ip, port) mongo_cmd = "mongoimport {} --db {} --collection {} --file {}".format( host, database, collection, json_file) mongo = sp.Popen(mongo_cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = mongo.communicate()
def index(self, sub, start=None, end=None): """Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.index(sub, start, end)
Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position.
Below is the the instruction that describes the task: ### Input: Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. ### Response: def index(self, sub, start=None, end=None): """Like S.find() but raise ValueError when the substring is not found. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.index(sub, start, end)
def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
Below is the the instruction that describes the task: ### Input: Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. ### Response: def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
def get_nearest_year_for_day(day): """ Returns the nearest year to now inferred from a Julian date. """ now = time.gmtime() result = now.tm_year # if the day is far greater than today, it must be from last year if day - now.tm_yday > 365 // 2: result -= 1 # if the day is far less than today, it must be for next year. if now.tm_yday - day > 365 // 2: result += 1 return result
Returns the nearest year to now inferred from a Julian date.
Below is the the instruction that describes the task: ### Input: Returns the nearest year to now inferred from a Julian date. ### Response: def get_nearest_year_for_day(day): """ Returns the nearest year to now inferred from a Julian date. """ now = time.gmtime() result = now.tm_year # if the day is far greater than today, it must be from last year if day - now.tm_yday > 365 // 2: result -= 1 # if the day is far less than today, it must be for next year. if now.tm_yday - day > 365 // 2: result += 1 return result
def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise.
Below is the the instruction that describes the task: ### Input: Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. ### Response: def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
def find_users(self, *args): """ Returns the users to search given names as args. Return all users if there are no args provided. """ if args: names = reduce(lambda query, arg: query | (Q(first_name__icontains=arg) | Q(last_name__icontains=arg)), args, Q()) # noqa users = User.objects.filter(names) # If no args given, check every user else: users = User.objects.all() # Display errors if no user was found if not users.count() and args: if len(args) == 1: raise CommandError('No user was found with the name %s' % args[0]) else: arg_list = ', '.join(args) raise CommandError('No users found with the names: %s' % arg_list) return users
Returns the users to search given names as args. Return all users if there are no args provided.
Below is the the instruction that describes the task: ### Input: Returns the users to search given names as args. Return all users if there are no args provided. ### Response: def find_users(self, *args): """ Returns the users to search given names as args. Return all users if there are no args provided. """ if args: names = reduce(lambda query, arg: query | (Q(first_name__icontains=arg) | Q(last_name__icontains=arg)), args, Q()) # noqa users = User.objects.filter(names) # If no args given, check every user else: users = User.objects.all() # Display errors if no user was found if not users.count() and args: if len(args) == 1: raise CommandError('No user was found with the name %s' % args[0]) else: arg_list = ', '.join(args) raise CommandError('No users found with the names: %s' % arg_list) return users
def _nxapi_request(commands, method='cli_conf', **kwargs): ''' Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``. ''' if CONNECTION == 'ssh': return '_nxapi_request is not available for ssh proxy' conn_args = DEVICE_DETAILS['conn_args'] conn_args.update(kwargs) data = __utils__['nxos.nxapi_request'](commands, method=method, **conn_args) return data
Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``.
Below is the the instruction that describes the task: ### Input: Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``. ### Response: def _nxapi_request(commands, method='cli_conf', **kwargs): ''' Executes an nxapi_request request over NX-API. commands The exec or config commands to be sent. method: ``cli_show`` ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_conf``. ''' if CONNECTION == 'ssh': return '_nxapi_request is not available for ssh proxy' conn_args = DEVICE_DETAILS['conn_args'] conn_args.update(kwargs) data = __utils__['nxos.nxapi_request'](commands, method=method, **conn_args) return data
def _transactional(self, method, *argv, **argd): """ Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method. """ self._session.begin(subtransactions = True) try: result = method(self, *argv, **argd) self._session.commit() return result except: self._session.rollback() raise
Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method.
Below is the the instruction that describes the task: ### Input: Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method. ### Response: def _transactional(self, method, *argv, **argd): """ Begins a transaction and calls the given DAO method. If the method executes successfully the transaction is commited. If the method fails, the transaction is rolled back. @type method: callable @param method: Bound method of this class or one of its subclasses. The first argument will always be C{self}. @return: The return value of the method call. @raise Exception: Any exception raised by the method. """ self._session.begin(subtransactions = True) try: result = method(self, *argv, **argd) self._session.commit() return result except: self._session.rollback() raise
def _observers_for_notification(self, ntype, sender): """Find all registered observers that should recieve notification""" keys = ( (ntype,sender), (ntype, None), (None, sender), (None,None) ) obs = set() for k in keys: obs.update(self.observers.get(k, set())) return obs
Find all registered observers that should recieve notification
Below is the the instruction that describes the task: ### Input: Find all registered observers that should recieve notification ### Response: def _observers_for_notification(self, ntype, sender): """Find all registered observers that should recieve notification""" keys = ( (ntype,sender), (ntype, None), (None, sender), (None,None) ) obs = set() for k in keys: obs.update(self.observers.get(k, set())) return obs
def ftp_url(self): # type: () -> Text """Get the FTP url this filesystem will open.""" url = ( "ftp://{}".format(self.host) if self.port == 21 else "ftp://{}:{}".format(self.host, self.port) ) return url
Get the FTP url this filesystem will open.
Below is the the instruction that describes the task: ### Input: Get the FTP url this filesystem will open. ### Response: def ftp_url(self): # type: () -> Text """Get the FTP url this filesystem will open.""" url = ( "ftp://{}".format(self.host) if self.port == 21 else "ftp://{}:{}".format(self.host, self.port) ) return url
def get_chinese_new_year(self, year): """ Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after. """ days = [] lunar_first_day = ChineseNewYearCalendar.lunar(year, 1, 1) # Chinese new year's eve if self.include_chinese_new_year_eve: days.append(( lunar_first_day - timedelta(days=1), self.chinese_new_year_eve_label )) # Chinese new year (is included by default) if self.include_chinese_new_year: days.append((lunar_first_day, self.chinese_new_year_label)) if self.include_chinese_second_day: lunar_second_day = lunar_first_day + timedelta(days=1) days.append(( lunar_second_day, self.chinese_second_day_label )) if self.include_chinese_third_day: lunar_third_day = lunar_first_day + timedelta(days=2) days.append(( lunar_third_day, self.chinese_third_day_label )) if self.shift_sunday_holidays: if lunar_first_day.weekday() == SUN: if self.shift_start_cny_sunday: days.append( (lunar_first_day - timedelta(days=1), "Chinese Lunar New Year shift"), ) else: if self.include_chinese_third_day: shift_day = lunar_third_day else: shift_day = lunar_second_day days.append( (shift_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) if (lunar_second_day.weekday() == SUN and self.include_chinese_third_day): days.append( (lunar_third_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) return days
Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after.
Below is the the instruction that describes the task: ### Input: Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after. ### Response: def get_chinese_new_year(self, year): """ Compute Chinese New Year days. To return a list of holidays. By default, it'll at least return the Chinese New Year holidays chosen using the following options: * ``include_chinese_new_year_eve`` * ``include_chinese_new_year`` (on by default) * ``include_chinese_second_day`` If the ``shift_sunday_holidays`` option is on, the rules are the following. * If the CNY1 falls on MON-FRI, there's not shift. * If the CNY1 falls on SAT, the CNY2 is shifted to the Monday after. * If the CNY1 falls on SUN, the CNY1 is shifted to the Monday after, and CNY2 is shifted to the Tuesday after. """ days = [] lunar_first_day = ChineseNewYearCalendar.lunar(year, 1, 1) # Chinese new year's eve if self.include_chinese_new_year_eve: days.append(( lunar_first_day - timedelta(days=1), self.chinese_new_year_eve_label )) # Chinese new year (is included by default) if self.include_chinese_new_year: days.append((lunar_first_day, self.chinese_new_year_label)) if self.include_chinese_second_day: lunar_second_day = lunar_first_day + timedelta(days=1) days.append(( lunar_second_day, self.chinese_second_day_label )) if self.include_chinese_third_day: lunar_third_day = lunar_first_day + timedelta(days=2) days.append(( lunar_third_day, self.chinese_third_day_label )) if self.shift_sunday_holidays: if lunar_first_day.weekday() == SUN: if self.shift_start_cny_sunday: days.append( (lunar_first_day - timedelta(days=1), "Chinese Lunar New Year shift"), ) else: if self.include_chinese_third_day: shift_day = lunar_third_day else: shift_day = lunar_second_day days.append( (shift_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) if (lunar_second_day.weekday() == SUN and self.include_chinese_third_day): days.append( (lunar_third_day + timedelta(days=1), "Chinese Lunar New Year shift"), ) return days
def compare_and_set(self, expect, update): ''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._lock.exclusive: if self._value == expect: self._value = update return True return False
Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value.
Below is the the instruction that describes the task: ### Input: Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ### Response: def compare_and_set(self, expect, update): ''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._lock.exclusive: if self._value == expect: self._value = update return True return False
def add_or_update(self, app_id, value): ''' Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format. ''' MEvaluation.add_or_update(self.userinfo.uid, app_id, value) out_dic = { 'eval0': MEvaluation.app_evaluation_count(app_id, 0), 'eval1': MEvaluation.app_evaluation_count(app_id, 1) } return json.dump(out_dic, self)
Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format.
Below is the the instruction that describes the task: ### Input: Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format. ### Response: def add_or_update(self, app_id, value): ''' Adding or updating the evalution. :param app_id: the ID of the post. :param value: the evaluation :return: in JSON format. ''' MEvaluation.add_or_update(self.userinfo.uid, app_id, value) out_dic = { 'eval0': MEvaluation.app_evaluation_count(app_id, 0), 'eval1': MEvaluation.app_evaluation_count(app_id, 1) } return json.dump(out_dic, self)
def set(self, obj, build_kwargs): """Set cached value.""" if build_kwargs is None: build_kwargs = {} cached = {} if 'queryset' in build_kwargs: cached = { 'model': build_kwargs['queryset'].model, 'pks': list(build_kwargs['queryset'].values_list('pk', flat=True)), } elif 'obj' in build_kwargs: cached = { 'obj': build_kwargs['obj'], } if not hasattr(self._thread_local, 'cache'): self._thread_local.cache = {} self._thread_local.cache[self._get_cache_key(obj)] = cached
Set cached value.
Below is the the instruction that describes the task: ### Input: Set cached value. ### Response: def set(self, obj, build_kwargs): """Set cached value.""" if build_kwargs is None: build_kwargs = {} cached = {} if 'queryset' in build_kwargs: cached = { 'model': build_kwargs['queryset'].model, 'pks': list(build_kwargs['queryset'].values_list('pk', flat=True)), } elif 'obj' in build_kwargs: cached = { 'obj': build_kwargs['obj'], } if not hasattr(self._thread_local, 'cache'): self._thread_local.cache = {} self._thread_local.cache[self._get_cache_key(obj)] = cached
def play_audio(filename: str): """ Args: filename: Audio filename """ import platform from subprocess import Popen player = 'play' if platform.system() == 'Darwin' else 'aplay' Popen([player, '-q', filename])
Args: filename: Audio filename
Below is the the instruction that describes the task: ### Input: Args: filename: Audio filename ### Response: def play_audio(filename: str): """ Args: filename: Audio filename """ import platform from subprocess import Popen player = 'play' if platform.system() == 'Darwin' else 'aplay' Popen([player, '-q', filename])
def kruskal_align(U, V, permute_U=False, permute_V=False): """Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one. """ # Compute similarity matrices. unrm = [f / np.linalg.norm(f, axis=0) for f in U.factors] vnrm = [f / np.linalg.norm(f, axis=0) for f in V.factors] sim_matrices = [np.dot(u.T, v) for u, v in zip(unrm, vnrm)] cost = 1 - np.mean(np.abs(sim_matrices), axis=0) # Solve matching problem via Hungarian algorithm. indices = Munkres().compute(cost.copy()) prmU, prmV = zip(*indices) # Compute mean factor similarity given the optimal matching. similarity = np.mean(1 - cost[prmU, prmV]) # If U and V are of different ranks, identify unmatched factors. unmatched_U = list(set(range(U.rank)) - set(prmU)) unmatched_V = list(set(range(V.rank)) - set(prmV)) # If permuting both U and V, order factors from most to least similar. if permute_U and permute_V: idx = np.argsort(cost[prmU, prmV]) # If permute_U is False, then order the factors such that the ordering # for U is unchanged. elif permute_V: idx = np.argsort(prmU) # If permute_V is False, then order the factors such that the ordering # for V is unchanged. elif permute_U: idx = np.argsort(prmV) # If permute_U and permute_V are both False, then we are done and can # simply return the similarity. else: return similarity # Re-order the factor permutations. prmU = [prmU[i] for i in idx] prmV = [prmV[i] for i in idx] # Permute the factors. if permute_U: U.permute(prmU) if permute_V: V.permute(prmV) # Flip the signs of factors. flips = np.sign([F[prmU, prmV] for F in sim_matrices]) flips[0] *= np.prod(flips, axis=0) # always flip an even number of factors if permute_U: for i, f in enumerate(flips): U.factors[i] *= f elif permute_V: for i, f in enumerate(flips): V.factors[i] *= f # Return the similarity score return similarity
Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one.
Below is the the instruction that describes the task: ### Input: Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one. ### Response: def kruskal_align(U, V, permute_U=False, permute_V=False): """Aligns two KTensors and returns a similarity score. Parameters ---------- U : KTensor First kruskal tensor to align. V : KTensor Second kruskal tensor to align. permute_U : bool If True, modifies 'U' to align the KTensors (default is False). permute_V : bool If True, modifies 'V' to align the KTensors (default is False). Notes ----- If both `permute_U` and `permute_V` are both set to True, then the factors are ordered from most to least similar. If only one is True then the factors on the modified KTensor are re-ordered to match the factors in the un-aligned KTensor. Returns ------- similarity : float Similarity score between zero and one. """ # Compute similarity matrices. unrm = [f / np.linalg.norm(f, axis=0) for f in U.factors] vnrm = [f / np.linalg.norm(f, axis=0) for f in V.factors] sim_matrices = [np.dot(u.T, v) for u, v in zip(unrm, vnrm)] cost = 1 - np.mean(np.abs(sim_matrices), axis=0) # Solve matching problem via Hungarian algorithm. indices = Munkres().compute(cost.copy()) prmU, prmV = zip(*indices) # Compute mean factor similarity given the optimal matching. similarity = np.mean(1 - cost[prmU, prmV]) # If U and V are of different ranks, identify unmatched factors. unmatched_U = list(set(range(U.rank)) - set(prmU)) unmatched_V = list(set(range(V.rank)) - set(prmV)) # If permuting both U and V, order factors from most to least similar. if permute_U and permute_V: idx = np.argsort(cost[prmU, prmV]) # If permute_U is False, then order the factors such that the ordering # for U is unchanged. elif permute_V: idx = np.argsort(prmU) # If permute_V is False, then order the factors such that the ordering # for V is unchanged. elif permute_U: idx = np.argsort(prmV) # If permute_U and permute_V are both False, then we are done and can # simply return the similarity. else: return similarity # Re-order the factor permutations. prmU = [prmU[i] for i in idx] prmV = [prmV[i] for i in idx] # Permute the factors. if permute_U: U.permute(prmU) if permute_V: V.permute(prmV) # Flip the signs of factors. flips = np.sign([F[prmU, prmV] for F in sim_matrices]) flips[0] *= np.prod(flips, axis=0) # always flip an even number of factors if permute_U: for i, f in enumerate(flips): U.factors[i] *= f elif permute_V: for i, f in enumerate(flips): V.factors[i] *= f # Return the similarity score return similarity
def sql(self, stmt, parameters=None, bulk_parameters=None): """ Execute SQL stmt against the crate server. """ if stmt is None: return None data = _create_sql_payload(stmt, parameters, bulk_parameters) logger.debug( 'Sending request to %s with payload: %s', self.path, data) content = self._json_request('POST', self.path, data=data) logger.debug("JSON response for stmt(%s): %s", stmt, content) return content
Execute SQL stmt against the crate server.
Below is the the instruction that describes the task: ### Input: Execute SQL stmt against the crate server. ### Response: def sql(self, stmt, parameters=None, bulk_parameters=None): """ Execute SQL stmt against the crate server. """ if stmt is None: return None data = _create_sql_payload(stmt, parameters, bulk_parameters) logger.debug( 'Sending request to %s with payload: %s', self.path, data) content = self._json_request('POST', self.path, data=data) logger.debug("JSON response for stmt(%s): %s", stmt, content) return content
def realpred(cls, lemma, pos, sense=None): """Instantiate a Pred from its components.""" string_tokens = [lemma] if pos is not None: string_tokens.append(pos) if sense is not None: sense = str(sense) string_tokens.append(sense) predstr = '_'.join([''] + string_tokens + ['rel']) return cls(Pred.REALPRED, lemma, pos, sense, predstr)
Instantiate a Pred from its components.
Below is the the instruction that describes the task: ### Input: Instantiate a Pred from its components. ### Response: def realpred(cls, lemma, pos, sense=None): """Instantiate a Pred from its components.""" string_tokens = [lemma] if pos is not None: string_tokens.append(pos) if sense is not None: sense = str(sense) string_tokens.append(sense) predstr = '_'.join([''] + string_tokens + ['rel']) return cls(Pred.REALPRED, lemma, pos, sense, predstr)
def allow_relation(self, obj1, obj2, **hints): """ Relations between objects are allowed between nodeshot2 objects only """ if obj1._meta.app_label != 'oldimporter' and obj2._meta.app_label != 'oldimporter': return True return None
Relations between objects are allowed between nodeshot2 objects only
Below is the the instruction that describes the task: ### Input: Relations between objects are allowed between nodeshot2 objects only ### Response: def allow_relation(self, obj1, obj2, **hints): """ Relations between objects are allowed between nodeshot2 objects only """ if obj1._meta.app_label != 'oldimporter' and obj2._meta.app_label != 'oldimporter': return True return None
def get_user_and_check_auth(self, username, password): """Check the combination username/password that is valid on the database. """ constraint = sql.or_( models.USERS.c.name == username, models.USERS.c.email == username ) user = self.identity_from_db(models.USERS, constraint) if user is None: raise dci_exc.DCIException('User %s does not exists.' % username, status_code=401) return user, auth.check_passwords_equal(password, user.password)
Check the combination username/password that is valid on the database.
Below is the the instruction that describes the task: ### Input: Check the combination username/password that is valid on the database. ### Response: def get_user_and_check_auth(self, username, password): """Check the combination username/password that is valid on the database. """ constraint = sql.or_( models.USERS.c.name == username, models.USERS.c.email == username ) user = self.identity_from_db(models.USERS, constraint) if user is None: raise dci_exc.DCIException('User %s does not exists.' % username, status_code=401) return user, auth.check_passwords_equal(password, user.password)
def _set_up_pool_config(self): ''' Helper to configure pool options during DatabaseWrapper initialization. ''' self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS']) self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns) self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW', pool_config_defaults['TEST_ON_BORROW']) if self._test_on_borrow: self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY', pool_config_defaults['TEST_ON_BORROW_QUERY']) else: self._test_on_borrow_query = None
Helper to configure pool options during DatabaseWrapper initialization.
Below is the the instruction that describes the task: ### Input: Helper to configure pool options during DatabaseWrapper initialization. ### Response: def _set_up_pool_config(self): ''' Helper to configure pool options during DatabaseWrapper initialization. ''' self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS']) self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns) self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW', pool_config_defaults['TEST_ON_BORROW']) if self._test_on_borrow: self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY', pool_config_defaults['TEST_ON_BORROW_QUERY']) else: self._test_on_borrow_query = None
def remove_mock(self, mock): """ Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove. """ self.mocks = [m for m in self.mocks if m is not mock]
Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove.
Below is the the instruction that describes the task: ### Input: Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove. ### Response: def remove_mock(self, mock): """ Removes a specific mock instance by object reference. Arguments: mock (pook.Mock): mock instance to remove. """ self.mocks = [m for m in self.mocks if m is not mock]
def wrap_content(content, settings, hard_breaks=False): """ Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags. """ settings.context['content'] = wrap_paragraphs(content, hard_breaks) template = Template(settings.template) try: return template.render(**settings.context) except KeyError as error: msg = "missing context setting: {}".format(error) raise ContextError(msg)
Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags.
Below is the the instruction that describes the task: ### Input: Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags. ### Response: def wrap_content(content, settings, hard_breaks=False): """ Returns *content* wrapped in a HTML structure. If *hard_breaks* is set, line breaks are converted to `<br />` tags. """ settings.context['content'] = wrap_paragraphs(content, hard_breaks) template = Template(settings.template) try: return template.render(**settings.context) except KeyError as error: msg = "missing context setting: {}".format(error) raise ContextError(msg)
def isdisjoint(self, other): """Return True if the set has no elements in common with other.""" return not bool(self.db.sinter([self.key, other.key]))
Return True if the set has no elements in common with other.
Below is the the instruction that describes the task: ### Input: Return True if the set has no elements in common with other. ### Response: def isdisjoint(self, other): """Return True if the set has no elements in common with other.""" return not bool(self.db.sinter([self.key, other.key]))
def addValue(self, protocolElement): """ Appends the specified protocolElement to the value list for this response. """ self._numElements += 1 self._bufferSize += protocolElement.ByteSize() attr = getattr(self._protoObject, self._valueListName) obj = attr.add() obj.CopyFrom(protocolElement)
Appends the specified protocolElement to the value list for this response.
Below is the the instruction that describes the task: ### Input: Appends the specified protocolElement to the value list for this response. ### Response: def addValue(self, protocolElement): """ Appends the specified protocolElement to the value list for this response. """ self._numElements += 1 self._bufferSize += protocolElement.ByteSize() attr = getattr(self._protoObject, self._valueListName) obj = attr.add() obj.CopyFrom(protocolElement)
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): """ Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. """ if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used.
Below is the the instruction that describes the task: ### Input: Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. ### Response: def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): """ Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. """ if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
def get_source_metadata(self): """Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['source']) metadata.update({'existing_id_values': self._my_map['sourceId']}) return Metadata(**metadata)
Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.* ### Response: def get_source_metadata(self): """Gets the metadata for the source. return: (osid.Metadata) - metadata for the source *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['source']) metadata.update({'existing_id_values': self._my_map['sourceId']}) return Metadata(**metadata)
def get_local_tzone(): """Get the current time zone on the local host""" if localtime().tm_isdst: if altzone < 0: tzone = '+' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: tzone = '-' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: if altzone < 0: tzone = \ '+' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') else: tzone = \ '-' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') return tzone
Get the current time zone on the local host
Below is the the instruction that describes the task: ### Input: Get the current time zone on the local host ### Response: def get_local_tzone(): """Get the current time zone on the local host""" if localtime().tm_isdst: if altzone < 0: tzone = '+' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: tzone = '-' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: if altzone < 0: tzone = \ '+' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') else: tzone = \ '-' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') return tzone
def relayIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.relayWidth y = i / self.relayWidth return x, y
Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate
Below is the the instruction that describes the task: ### Input: Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate ### Response: def relayIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.relayWidth y = i / self.relayWidth return x, y
def tree_view_keypress_callback(self, widget, event): """Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return: """ # self._logger.info("key_value: " + str(event.keyval if event is not None else '')) if event and "GDK_KEY_PRESS" == event.type.value_name \ and (event.keyval == Gdk.KEY_Tab or event.keyval == Gdk.KEY_ISO_Left_Tab): [path, focus_column] = self.tree_view.get_cursor() if not path: return False self.tree_view_keypress_callback.__func__.core_element_id = self.store[path][self.ID_STORAGE_ID] # finish active edit process if self.active_entry_widget is not None: text = self.active_entry_widget.get_buffer().get_text() if focus_column in self.widget_columns: focus_column.get_cells()[0].emit('edited', path[0], text) # row could be updated by other call_backs caused by emitting 'edited' signal but selection stays an editable neighbor path = self.get_path_for_core_element(self.tree_view_keypress_callback.__func__.core_element_id) if event.keyval == Gdk.KEY_Tab: # logger.info("move right") direction = +1 else: # logger.info("move left") direction = -1 # get next row_id for focus if direction < 0 and focus_column is self.widget_columns[0] \ or direction > 0 and focus_column is self.widget_columns[-1]: if direction < 0 < path[0] or direction > 0 and not path[0] + 1 > len(self.store): next_row = path[0] + direction else: return False else: next_row = path[0] # get next column_id for focus focus_column_id = self.widget_columns.index(focus_column) if focus_column_id is not None: # search all columns for next editable cell renderer next_focus_column_id = 0 for index in range(len(self.tree_view.get_model())): test_id = focus_column_id + direction * index + direction next_focus_column_id = test_id % len(self.widget_columns) if test_id > len(self.widget_columns) - 1 or test_id < 0: next_row = path[0] + direction if next_row < 0 or next_row > len(self.tree_view.get_model()) - 1: return False if self.widget_columns[next_focus_column_id].get_cells()[0].get_property('editable'): break else: return False del self.tree_view_keypress_callback.__func__.core_element_id # self._logger.info("self.tree_view.scroll_to_cell(next_row={0}, self.widget_columns[{1}] , use_align={2})" # "".format(next_row, next_focus_column_id, False)) # self.tree_view.scroll_to_cell(next_row, self.widget_columns[next_focus_column_id], use_align=False) self.tree_view.set_cursor_on_cell(Gtk.TreePath.new_from_indices([next_row]), self.widget_columns[ next_focus_column_id], focus_cell=None, start_editing=True) return True else: super(ListViewController, self).tree_view_keypress_callback(widget, event)
Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return:
Below is the the instruction that describes the task: ### Input: Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return: ### Response: def tree_view_keypress_callback(self, widget, event): """Tab back and forward tab-key motion in list widget and the scrollbar motion to follow key cursor motions The method introduce motion and edit functionality by using "tab"- or "shift-tab"-key for a Gtk.TreeView. It is designed to work with a Gtk.TreeView which model is a Gtk.ListStore and only uses text cell renderer. Additional, the TreeView is assumed to be used as a list not as a tree. With the "tab"-key the cell on the right site of the actual focused cell is started to be edit. Changes in the Gtk.Entry-Widget are confirmed by emitting a 'edited'-signal. If the row ends the edit process continues with the first cell of the next row. With the "shift-tab"-key the inverse functionality of the "tab"-key is provided. The Controller over steps not editable cells. :param Gtk.TreeView widget: The tree view the controller use :param Gdk.Event event: The key press event :return: """ # self._logger.info("key_value: " + str(event.keyval if event is not None else '')) if event and "GDK_KEY_PRESS" == event.type.value_name \ and (event.keyval == Gdk.KEY_Tab or event.keyval == Gdk.KEY_ISO_Left_Tab): [path, focus_column] = self.tree_view.get_cursor() if not path: return False self.tree_view_keypress_callback.__func__.core_element_id = self.store[path][self.ID_STORAGE_ID] # finish active edit process if self.active_entry_widget is not None: text = self.active_entry_widget.get_buffer().get_text() if focus_column in self.widget_columns: focus_column.get_cells()[0].emit('edited', path[0], text) # row could be updated by other call_backs caused by emitting 'edited' signal but selection stays an editable neighbor path = self.get_path_for_core_element(self.tree_view_keypress_callback.__func__.core_element_id) if event.keyval == Gdk.KEY_Tab: # logger.info("move right") direction = +1 else: # logger.info("move left") direction = -1 # get next row_id for focus if direction < 0 and focus_column is self.widget_columns[0] \ or direction > 0 and focus_column is self.widget_columns[-1]: if direction < 0 < path[0] or direction > 0 and not path[0] + 1 > len(self.store): next_row = path[0] + direction else: return False else: next_row = path[0] # get next column_id for focus focus_column_id = self.widget_columns.index(focus_column) if focus_column_id is not None: # search all columns for next editable cell renderer next_focus_column_id = 0 for index in range(len(self.tree_view.get_model())): test_id = focus_column_id + direction * index + direction next_focus_column_id = test_id % len(self.widget_columns) if test_id > len(self.widget_columns) - 1 or test_id < 0: next_row = path[0] + direction if next_row < 0 or next_row > len(self.tree_view.get_model()) - 1: return False if self.widget_columns[next_focus_column_id].get_cells()[0].get_property('editable'): break else: return False del self.tree_view_keypress_callback.__func__.core_element_id # self._logger.info("self.tree_view.scroll_to_cell(next_row={0}, self.widget_columns[{1}] , use_align={2})" # "".format(next_row, next_focus_column_id, False)) # self.tree_view.scroll_to_cell(next_row, self.widget_columns[next_focus_column_id], use_align=False) self.tree_view.set_cursor_on_cell(Gtk.TreePath.new_from_indices([next_row]), self.widget_columns[ next_focus_column_id], focus_cell=None, start_editing=True) return True else: super(ListViewController, self).tree_view_keypress_callback(widget, event)
def prepareToRemove(self): """ Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success """ # disconnect the signals from the input and output nodes for node in (self._outputNode, self._inputNode): self.disconnectSignals(node) # clear the pointers to the nodes self._inputNode = None self._outputNode = None return True
Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success
Below is the the instruction that describes the task: ### Input: Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success ### Response: def prepareToRemove(self): """ Handles any code that needs to run to cleanup the connection \ before it gets removed from the scene. :return <bool> success """ # disconnect the signals from the input and output nodes for node in (self._outputNode, self._inputNode): self.disconnectSignals(node) # clear the pointers to the nodes self._inputNode = None self._outputNode = None return True
def updateAnomalyLikelihoods(anomalyScores, params, verbosity=0): """ Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric. """ if verbosity > 3: print("In updateAnomalyLikelihoods.") print("Number of anomaly scores:", len(anomalyScores)) print("First 20:", anomalyScores[0:min(20, len(anomalyScores))]) print("Params:", params) if len(anomalyScores) == 0: raise ValueError("Must have at least one anomalyScore") if not isValidEstimatorParams(params): raise ValueError("'params' is not a valid params structure") # For backward compatibility. if "historicalLikelihoods" not in params: params["historicalLikelihoods"] = [1.0] # Compute moving averages of these new scores using the previous values # as well as likelihood for these scores using the old estimator historicalValues = params["movingAverage"]["historicalValues"] total = params["movingAverage"]["total"] windowSize = params["movingAverage"]["windowSize"] aggRecordList = numpy.zeros(len(anomalyScores), dtype=float) likelihoods = numpy.zeros(len(anomalyScores), dtype=float) for i, v in enumerate(anomalyScores): newAverage, historicalValues, total = ( MovingAverage.compute(historicalValues, total, v[2], windowSize) ) aggRecordList[i] = newAverage likelihoods[i] = tailProbability(newAverage, params["distribution"]) # Filter the likelihood values. First we prepend the historical likelihoods # to the current set. Then we filter the values. We peel off the likelihoods # to return and the last windowSize values to store for later. likelihoods2 = params["historicalLikelihoods"] + list(likelihoods) filteredLikelihoods = _filterLikelihoods(likelihoods2) likelihoods[:] = filteredLikelihoods[-len(likelihoods):] historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):] # Update the estimator newParams = { "distribution": params["distribution"], "movingAverage": { "historicalValues": historicalValues, "total": total, "windowSize": windowSize, }, "historicalLikelihoods": historicalLikelihoods, } assert len(newParams["historicalLikelihoods"]) <= windowSize if verbosity > 3: print("Number of likelihoods:", len(likelihoods)) print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))]) print("Leaving updateAnomalyLikelihoods.") return (likelihoods, aggRecordList, newParams)
Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric.
Below is the the instruction that describes the task: ### Input: Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric. ### Response: def updateAnomalyLikelihoods(anomalyScores, params, verbosity=0): """ Compute updated probabilities for anomalyScores using the given params. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] :param params: the JSON dict returned by estimateAnomalyLikelihoods :param verbosity: integer controlling extent of printouts for debugging :type verbosity: int :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params an updated JSON object containing the state of this metric. """ if verbosity > 3: print("In updateAnomalyLikelihoods.") print("Number of anomaly scores:", len(anomalyScores)) print("First 20:", anomalyScores[0:min(20, len(anomalyScores))]) print("Params:", params) if len(anomalyScores) == 0: raise ValueError("Must have at least one anomalyScore") if not isValidEstimatorParams(params): raise ValueError("'params' is not a valid params structure") # For backward compatibility. if "historicalLikelihoods" not in params: params["historicalLikelihoods"] = [1.0] # Compute moving averages of these new scores using the previous values # as well as likelihood for these scores using the old estimator historicalValues = params["movingAverage"]["historicalValues"] total = params["movingAverage"]["total"] windowSize = params["movingAverage"]["windowSize"] aggRecordList = numpy.zeros(len(anomalyScores), dtype=float) likelihoods = numpy.zeros(len(anomalyScores), dtype=float) for i, v in enumerate(anomalyScores): newAverage, historicalValues, total = ( MovingAverage.compute(historicalValues, total, v[2], windowSize) ) aggRecordList[i] = newAverage likelihoods[i] = tailProbability(newAverage, params["distribution"]) # Filter the likelihood values. First we prepend the historical likelihoods # to the current set. Then we filter the values. We peel off the likelihoods # to return and the last windowSize values to store for later. likelihoods2 = params["historicalLikelihoods"] + list(likelihoods) filteredLikelihoods = _filterLikelihoods(likelihoods2) likelihoods[:] = filteredLikelihoods[-len(likelihoods):] historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):] # Update the estimator newParams = { "distribution": params["distribution"], "movingAverage": { "historicalValues": historicalValues, "total": total, "windowSize": windowSize, }, "historicalLikelihoods": historicalLikelihoods, } assert len(newParams["historicalLikelihoods"]) <= windowSize if verbosity > 3: print("Number of likelihoods:", len(likelihoods)) print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))]) print("Leaving updateAnomalyLikelihoods.") return (likelihoods, aggRecordList, newParams)
def request(self, method, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params): """Calls the service at the specified URL using the "CALL" method""" raise NotImplementedError("Concrete services must define the request method")
Calls the service at the specified URL using the "CALL" method
Below is the the instruction that describes the task: ### Input: Calls the service at the specified URL using the "CALL" method ### Response: def request(self, method, url, url_params=empty.dict, headers=empty.dict, timeout=None, **params): """Calls the service at the specified URL using the "CALL" method""" raise NotImplementedError("Concrete services must define the request method")
def generate(env): """Add Builders and construction variables for the OS/2 to an Environment.""" cc.generate(env) env['CC'] = 'icc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CPPDEFPREFIX'] = '/D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '/I' env['INCSUFFIX'] = '' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cc'
Add Builders and construction variables for the OS/2 to an Environment.
Below is the the instruction that describes the task: ### Input: Add Builders and construction variables for the OS/2 to an Environment. ### Response: def generate(env): """Add Builders and construction variables for the OS/2 to an Environment.""" cc.generate(env) env['CC'] = 'icc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET' env['CPPDEFPREFIX'] = '/D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '/I' env['INCSUFFIX'] = '' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cc'
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
Below is the the instruction that describes the task: ### Input: Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. ### Response: def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
def reschedule(self, date, callable_name=None, content_object=None, expires='7d', args=None, kwargs=None): """Schedule a clone of this job.""" # Resolve date relative to the expected start of the current job. if isinstance(date, basestring): date = parse_timedelta(date) if isinstance(date, datetime.timedelta): date = self.time_slot_start + date if callable_name is None: callable_name = self.callable_name if content_object is None: content_object = self.content_object if args is None: args = self.args or [] if kwargs is None: kwargs = self.kwargs or {} from django_future import schedule_job return schedule_job(date, callable_name, content_object=content_object, expires=expires, args=args, kwargs=kwargs)
Schedule a clone of this job.
Below is the the instruction that describes the task: ### Input: Schedule a clone of this job. ### Response: def reschedule(self, date, callable_name=None, content_object=None, expires='7d', args=None, kwargs=None): """Schedule a clone of this job.""" # Resolve date relative to the expected start of the current job. if isinstance(date, basestring): date = parse_timedelta(date) if isinstance(date, datetime.timedelta): date = self.time_slot_start + date if callable_name is None: callable_name = self.callable_name if content_object is None: content_object = self.content_object if args is None: args = self.args or [] if kwargs is None: kwargs = self.kwargs or {} from django_future import schedule_job return schedule_job(date, callable_name, content_object=content_object, expires=expires, args=args, kwargs=kwargs)
def suggestion_list(input_: str, options: Collection[str]): """Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. """ options_by_distance = {} input_threshold = len(input_) // 2 for option in options: distance = lexical_distance(input_, option) threshold = max(input_threshold, len(option) // 2, 1) if distance <= threshold: options_by_distance[option] = distance return sorted(options_by_distance, key=options_by_distance.get)
Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input.
Below is the the instruction that describes the task: ### Input: Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. ### Response: def suggestion_list(input_: str, options: Collection[str]): """Get list with suggestions for a given input. Given an invalid input string and list of valid options, returns a filtered list of valid options sorted based on their similarity with the input. """ options_by_distance = {} input_threshold = len(input_) // 2 for option in options: distance = lexical_distance(input_, option) threshold = max(input_threshold, len(option) // 2, 1) if distance <= threshold: options_by_distance[option] = distance return sorted(options_by_distance, key=options_by_distance.get)
def t_ID(self, t): r'[a-zA-Z]+' if t.value in self._RESERVED.keys(): t.type = self._RESERVED[t.value] return t if Information.is_valid_symbol(t.value) or \ Information.is_valid_category(t.value): t.type = self._INFORMATION_UNIT return t if Duration.is_valid_symbol(t.value): t.type = self._DURATION_UNIT return t raise LexingError('Unrecognised token or unit \'{0.value}\' at ' 'position {0.lexpos}'.format(t))
r'[a-zA-Z]+
Below is the the instruction that describes the task: ### Input: r'[a-zA-Z]+ ### Response: def t_ID(self, t): r'[a-zA-Z]+' if t.value in self._RESERVED.keys(): t.type = self._RESERVED[t.value] return t if Information.is_valid_symbol(t.value) or \ Information.is_valid_category(t.value): t.type = self._INFORMATION_UNIT return t if Duration.is_valid_symbol(t.value): t.type = self._DURATION_UNIT return t raise LexingError('Unrecognised token or unit \'{0.value}\' at ' 'position {0.lexpos}'.format(t))
def get_user_events(self, id, **data): """ GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to """ return self.get("/users/{0}/events/".format(id), data=data)
GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to
Below is the the instruction that describes the task: ### Input: GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to ### Response: def get_user_events(self, id, **data): """ GET /users/:id/events/ Returns a :ref:`paginated <pagination>` response of :format:`events <event>`, under the key ``events``, of all events the user has access to """ return self.get("/users/{0}/events/".format(id), data=data)
def serialize_header(self, pyobj, typecode=None, **kw): '''Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode ''' kw['unique'] = True soap_env = _reserved_ns['SOAP-ENV'] #header = self.dom.getElement(soap_env, 'Header') header = self._header if header is None: header = self._header = self.dom.createAppendElement(soap_env, 'Header') typecode = getattr(pyobj, 'typecode', typecode) if typecode is None: raise RuntimeError( 'typecode is required to serialize pyobj in header') helt = typecode.serialize(header, self, pyobj, **kw)
Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode
Below is the the instruction that describes the task: ### Input: Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode ### Response: def serialize_header(self, pyobj, typecode=None, **kw): '''Serialize a Python object in SOAP-ENV:Header, make sure everything in Header unique (no #href). Must call serialize first to create a document. Parameters: pyobjs -- instances to serialize in SOAP Header typecode -- default typecode ''' kw['unique'] = True soap_env = _reserved_ns['SOAP-ENV'] #header = self.dom.getElement(soap_env, 'Header') header = self._header if header is None: header = self._header = self.dom.createAppendElement(soap_env, 'Header') typecode = getattr(pyobj, 'typecode', typecode) if typecode is None: raise RuntimeError( 'typecode is required to serialize pyobj in header') helt = typecode.serialize(header, self, pyobj, **kw)
def match(self, set_a, set_b): """ For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame """ track_step_matches = [[] * len(set_a)] costs = self.cost_matrix(set_a, set_b) valid_costs = np.all(costs < 1, axis=2) set_a_matches, set_b_matches = np.where(valid_costs) s = 0 track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]), columns=["Track", "Step", "Time", "Matched", "Pairings"], dtype=object) set_b_info = [] for trb, track_b in enumerate(set_b): for t, time in enumerate(track_b.times): set_b_info.append((trb, t)) set_b_info_arr = np.array(set_b_info, dtype=int) for tr, track_a in enumerate(set_a): for t, time in enumerate(track_a.times): track_pairings.loc[s, ["Track", "Step", "Time"]] = [tr, t, time] track_pairings.loc[s, "Matched"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0 if track_pairings.loc[s, "Matched"] == 1: track_pairings.loc[s, "Pairings"] = set_b_info_arr[set_b_matches[set_a_matches == s]] else: track_pairings.loc[s, "Pairings"] = np.array([]) s += 1 return track_pairings
For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame
Below is the the instruction that describes the task: ### Input: For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame ### Response: def match(self, set_a, set_b): """ For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame """ track_step_matches = [[] * len(set_a)] costs = self.cost_matrix(set_a, set_b) valid_costs = np.all(costs < 1, axis=2) set_a_matches, set_b_matches = np.where(valid_costs) s = 0 track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]), columns=["Track", "Step", "Time", "Matched", "Pairings"], dtype=object) set_b_info = [] for trb, track_b in enumerate(set_b): for t, time in enumerate(track_b.times): set_b_info.append((trb, t)) set_b_info_arr = np.array(set_b_info, dtype=int) for tr, track_a in enumerate(set_a): for t, time in enumerate(track_a.times): track_pairings.loc[s, ["Track", "Step", "Time"]] = [tr, t, time] track_pairings.loc[s, "Matched"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0 if track_pairings.loc[s, "Matched"] == 1: track_pairings.loc[s, "Pairings"] = set_b_info_arr[set_b_matches[set_a_matches == s]] else: track_pairings.loc[s, "Pairings"] = np.array([]) s += 1 return track_pairings
def discard_config(self): """Discard changes (rollback 0).""" self.device.cu.rollback(rb_id=0) if not self.config_lock: self._unlock()
Discard changes (rollback 0).
Below is the the instruction that describes the task: ### Input: Discard changes (rollback 0). ### Response: def discard_config(self): """Discard changes (rollback 0).""" self.device.cu.rollback(rb_id=0) if not self.config_lock: self._unlock()
def fetch(self, start=None, stop=None): """ Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None** """ # Set defaults if no explicit indices were provided. if not start: start = 0 if not stop: stop = len(self.log) # Sanity check: indices must be valid. if start < 0: start = 0 if stop > len(self.log): stop = len(self.log) # Clear the fetch flag. It will be set again in the emit() # method once new data arrives. self.waitForFetch = False # Return the specified range of log records. return self.log[start:stop]
Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None**
Below is the the instruction that describes the task: ### Input: Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None** ### Response: def fetch(self, start=None, stop=None): """ Fetch log records and return them as a list. |Args| * ``start`` (**int**): non-negative index of the first log record to return. * ``stop`` (**int**): non-negative index of the last log record to return. |Returns| * **list**: list of log records (see ``logger`` module for definition of log record). |Raises| * **None** """ # Set defaults if no explicit indices were provided. if not start: start = 0 if not stop: stop = len(self.log) # Sanity check: indices must be valid. if start < 0: start = 0 if stop > len(self.log): stop = len(self.log) # Clear the fetch flag. It will be set again in the emit() # method once new data arrives. self.waitForFetch = False # Return the specified range of log records. return self.log[start:stop]
def publish_scene_remove(self, scene_id): """publish the removal of a scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_remove(self.sequence_number, scene_id)) return self.sequence_number
publish the removal of a scene
Below is the the instruction that describes the task: ### Input: publish the removal of a scene ### Response: def publish_scene_remove(self, scene_id): """publish the removal of a scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_remove(self.sequence_number, scene_id)) return self.sequence_number
def add(self, *value): '''convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added ''' flattenedValueList = list(flatten(value)) return self._add(flattenedValueList, self.value)
convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added
Below is the the instruction that describes the task: ### Input: convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added ### Response: def add(self, *value): '''convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added ''' flattenedValueList = list(flatten(value)) return self._add(flattenedValueList, self.value)
def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)
Backups files with the same name of the instance filename
Below is the the instruction that describes the task: ### Input: Backups files with the same name of the instance filename ### Response: def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)