code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
Roll provided date forward to next offset only if not on offset.
Below is the the instruction that describes the task: ### Input: Roll provided date forward to next offset only if not on offset. ### Response: def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """ if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d
Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed
Below is the the instruction that describes the task: ### Input: Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed ### Response: def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """ if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d
def check_file( state, fname, missing_msg="Did you create a file named `{}`?", is_dir_msg="Want to check a file named `{}`, but found a directory.", parse=True, use_fs=True, use_solution=False, ): """Test whether file exists, and make its contents the student code. Note: this SCT fails if the file is a directory. """ if use_fs: p = Path(fname) if not p.exists(): state.report(Feedback(missing_msg.format(fname))) # test file exists if p.is_dir(): state.report(Feedback(is_dir_msg.format(fname))) # test its not a dir code = p.read_text() else: code = _get_fname(state, "student_code", fname) if code is None: state.report(Feedback(missing_msg.format(fname))) # test file exists sol_kwargs = {"solution_code": None, "solution_ast": None} if use_solution: sol_code = _get_fname(state, "solution_code", fname) if sol_code is None: raise Exception("Solution code does not have file named: %s" % fname) sol_kwargs["solution_code"] = sol_code sol_kwargs["solution_ast"] = ( state.parse(sol_code, test=False) if parse else None ) return state.to_child( student_code=code, student_ast=state.parse(code) if parse else None, fname=fname, **sol_kwargs )
Test whether file exists, and make its contents the student code. Note: this SCT fails if the file is a directory.
Below is the the instruction that describes the task: ### Input: Test whether file exists, and make its contents the student code. Note: this SCT fails if the file is a directory. ### Response: def check_file( state, fname, missing_msg="Did you create a file named `{}`?", is_dir_msg="Want to check a file named `{}`, but found a directory.", parse=True, use_fs=True, use_solution=False, ): """Test whether file exists, and make its contents the student code. Note: this SCT fails if the file is a directory. """ if use_fs: p = Path(fname) if not p.exists(): state.report(Feedback(missing_msg.format(fname))) # test file exists if p.is_dir(): state.report(Feedback(is_dir_msg.format(fname))) # test its not a dir code = p.read_text() else: code = _get_fname(state, "student_code", fname) if code is None: state.report(Feedback(missing_msg.format(fname))) # test file exists sol_kwargs = {"solution_code": None, "solution_ast": None} if use_solution: sol_code = _get_fname(state, "solution_code", fname) if sol_code is None: raise Exception("Solution code does not have file named: %s" % fname) sol_kwargs["solution_code"] = sol_code sol_kwargs["solution_ast"] = ( state.parse(sol_code, test=False) if parse else None ) return state.to_child( student_code=code, student_ast=state.parse(code) if parse else None, fname=fname, **sol_kwargs )
def _set_sport(self, v, load=False): """ Setter method for sport, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/sport (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_sport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sport() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'lt': {'value': 3}, u'gt': {'value': 2}, u'eq': {'value': 1}, u'range': {'value': 5}, u'neq': {'value': 4}},), is_leaf=True, yang_name="sport", rest_name="sport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None, u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sport must be of a type compatible with enumeration""", 'defined-type': "brocade-ipv6-access-list:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'lt': {'value': 3}, u'gt': {'value': 2}, u'eq': {'value': 1}, u'range': {'value': 5}, u'neq': {'value': 4}},), is_leaf=True, yang_name="sport", rest_name="sport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None, u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='enumeration', is_config=True)""", }) self.__sport = t if hasattr(self, '_set'): self._set()
Setter method for sport, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/sport (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_sport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sport() directly.
Below is the the instruction that describes the task: ### Input: Setter method for sport, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/sport (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_sport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sport() directly. ### Response: def _set_sport(self, v, load=False): """ Setter method for sport, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/sport (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_sport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sport() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'lt': {'value': 3}, u'gt': {'value': 2}, u'eq': {'value': 1}, u'range': {'value': 5}, u'neq': {'value': 4}},), is_leaf=True, yang_name="sport", rest_name="sport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None, u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sport must be of a type compatible with enumeration""", 'defined-type': "brocade-ipv6-access-list:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'lt': {'value': 3}, u'gt': {'value': 2}, u'eq': {'value': 1}, u'range': {'value': 5}, u'neq': {'value': 4}},), is_leaf=True, yang_name="sport", rest_name="sport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None, u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='enumeration', is_config=True)""", }) self.__sport = t if hasattr(self, '_set'): self._set()
def get_obsmeta(self, lcid): """Get the observation metadata for the given id. This is table 3 of Sesar 2010 """ if self._obsdata is None: self._obsdata = fetch_rrlyrae_fitdata() i = np.where(self._obsdata['id'] == lcid)[0] if len(i) == 0: raise ValueError("invalid lcid: {0}".format(lcid)) return self._obsdata[i[0]]
Get the observation metadata for the given id. This is table 3 of Sesar 2010
Below is the the instruction that describes the task: ### Input: Get the observation metadata for the given id. This is table 3 of Sesar 2010 ### Response: def get_obsmeta(self, lcid): """Get the observation metadata for the given id. This is table 3 of Sesar 2010 """ if self._obsdata is None: self._obsdata = fetch_rrlyrae_fitdata() i = np.where(self._obsdata['id'] == lcid)[0] if len(i) == 0: raise ValueError("invalid lcid: {0}".format(lcid)) return self._obsdata[i[0]]
def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None): """ Returns an iterator that constructs a sequence of trigger levels with growing intervals. The interval is growing exponentially until it reaches the maximum value. Then the interval stays the same and the sequence becomes linear. An optional starting level `start_level` defaults to the initial interval. The interval starts out as `init_interval`, multiplied by `interval_growth` in each step until it reaches the `max_interval`. """ interval = init_interval next_level = start_level or init_interval while True: yield next_level interval = min(interval * interval_growth, max_interval) next_level += interval
Returns an iterator that constructs a sequence of trigger levels with growing intervals. The interval is growing exponentially until it reaches the maximum value. Then the interval stays the same and the sequence becomes linear. An optional starting level `start_level` defaults to the initial interval. The interval starts out as `init_interval`, multiplied by `interval_growth` in each step until it reaches the `max_interval`.
Below is the the instruction that describes the task: ### Input: Returns an iterator that constructs a sequence of trigger levels with growing intervals. The interval is growing exponentially until it reaches the maximum value. Then the interval stays the same and the sequence becomes linear. An optional starting level `start_level` defaults to the initial interval. The interval starts out as `init_interval`, multiplied by `interval_growth` in each step until it reaches the `max_interval`. ### Response: def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None): """ Returns an iterator that constructs a sequence of trigger levels with growing intervals. The interval is growing exponentially until it reaches the maximum value. Then the interval stays the same and the sequence becomes linear. An optional starting level `start_level` defaults to the initial interval. The interval starts out as `init_interval`, multiplied by `interval_growth` in each step until it reaches the `max_interval`. """ interval = init_interval next_level = start_level or init_interval while True: yield next_level interval = min(interval * interval_growth, max_interval) next_level += interval
def convert(model, input_shape, class_labels=None, mode=None, preprocessor_args=None, builder=None, verbose=True): """Convert an MXNet model to the protobuf spec. Parameters ---------- model: MXNet model A trained MXNet neural network model. input_shape: list of tuples A list of (name, shape) tuples, defining the input names and their shapes. The list also serves to define the desired order of the inputs. class_labels: A string or list of strings. As a string it represents the name of the file which contains the classification labels (one per line). As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier. mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. builder: `NeuralNetworkBuilder` If `None`, a builder will be created internally. This also means the builder will not be finalized and returned as an `MLModel`. Post-processing arguments will be ignored and class labels will not be integrated. This option is meant for advanced users. verbose: bool Print exported layers. **kwargs : Provide keyword arguments for: - input shapes. Supplied as a dictionary object with keyword "input_shape". - pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary tell the converted coreml model how to pre-process any input before an inference is run on it. For the list of pre-processing arguments see http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters Returns ------- model: A coreml model. """ if not isinstance(input_shape, list): raise TypeError("Must provide a list for input shape. e.g input_shape=[('data', (3,224,224))]") def remove_batch(dim): return dim[1:] input_names, input_dims = zip(*input_shape) input_dims = list(map(remove_batch, input_dims)) net = model.symbol # Infer shapes and store in a dictionary shapes = net.infer_shape(**dict(input_shape)) arg_names = net.list_arguments() output_names = net.list_outputs() aux_names = net.list_auxiliary_states() shape_dict = {} for idx, op in enumerate(arg_names): shape_dict[op] = shapes[0][idx] for idx, op in enumerate(output_names): shape_dict[op] = shapes[1][idx] for idx, op in enumerate(aux_names): shape_dict[op] = shapes[2][idx] # Get the inputs and outputs output_dims = shapes[1] if mode is None: output_dims = list(map(remove_batch, output_dims)) input_types = [_datatypes.Array(*dim) for dim in input_dims] output_types = [_datatypes.Array(*dim) for dim in output_dims] # Make the builder input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) finalize = builder is None if builder is None: builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode) # Get out the layers net = _json.loads(net.tojson()) nodes = net['nodes'] for i, node in enumerate(nodes): node['id'] = i if node['name'] in shape_dict: node['shape'] = shape_dict[node['name']] node['outputs'] = [] if 'inputs' in node: for ip in node['inputs']: nodes[ip[0]]['outputs'].append([i, 0]) else: node['inputs'] = [] # Mark the head nodes for head in net['heads']: head_id = head[0] head_node = nodes[head_id] head_node['outputs'] = [head] head_node['name'] += "_output" head_node['shape'] = shape_dict[head_node['name']] # For skipped layers, make sure nodes are modified for node in nodes: op = node['op'] inputs = node['inputs'] outputs = node['outputs'] if op in _MXNET_SKIP_LAYERS: nodes[inputs[0][0]]['outputs'][0] = outputs[0] nodes[outputs[0][0]]['inputs'][0] = inputs[0] # Find the input and output names for this node for idx, node in enumerate(nodes): op = node['op'] if op == 'null' or op in _MXNET_SKIP_LAYERS: continue name = node['name'] if verbose: print("%d : %s, %s" % (idx, name, op)) converter_func = _get_layer_converter_fn(op) converter_func(net, node, model, builder) # Only finalize builder if it was created internally. Otherwise, leave it # up to the user. if finalize: # Set the right inputs and outputs _set_input_output_layers(builder, input_names, output_names) builder.set_input(input_names, input_dims) builder.set_output(output_names, output_dims) if preprocessor_args is not None: builder.set_pre_processing_parameters(**preprocessor_args) if class_labels is not None: if type(class_labels) is str: labels = [l.strip() for l in open(class_labels).readlines()] elif type(class_labels) is list: labels = class_labels else: raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels)) builder.set_class_labels(class_labels = labels) # Return the model return _coremltools.models.MLModel(builder.spec)
Convert an MXNet model to the protobuf spec. Parameters ---------- model: MXNet model A trained MXNet neural network model. input_shape: list of tuples A list of (name, shape) tuples, defining the input names and their shapes. The list also serves to define the desired order of the inputs. class_labels: A string or list of strings. As a string it represents the name of the file which contains the classification labels (one per line). As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier. mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. builder: `NeuralNetworkBuilder` If `None`, a builder will be created internally. This also means the builder will not be finalized and returned as an `MLModel`. Post-processing arguments will be ignored and class labels will not be integrated. This option is meant for advanced users. verbose: bool Print exported layers. **kwargs : Provide keyword arguments for: - input shapes. Supplied as a dictionary object with keyword "input_shape". - pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary tell the converted coreml model how to pre-process any input before an inference is run on it. For the list of pre-processing arguments see http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters Returns ------- model: A coreml model.
Below is the the instruction that describes the task: ### Input: Convert an MXNet model to the protobuf spec. Parameters ---------- model: MXNet model A trained MXNet neural network model. input_shape: list of tuples A list of (name, shape) tuples, defining the input names and their shapes. The list also serves to define the desired order of the inputs. class_labels: A string or list of strings. As a string it represents the name of the file which contains the classification labels (one per line). As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier. mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. builder: `NeuralNetworkBuilder` If `None`, a builder will be created internally. This also means the builder will not be finalized and returned as an `MLModel`. Post-processing arguments will be ignored and class labels will not be integrated. This option is meant for advanced users. verbose: bool Print exported layers. **kwargs : Provide keyword arguments for: - input shapes. Supplied as a dictionary object with keyword "input_shape". - pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary tell the converted coreml model how to pre-process any input before an inference is run on it. For the list of pre-processing arguments see http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters Returns ------- model: A coreml model. ### Response: def convert(model, input_shape, class_labels=None, mode=None, preprocessor_args=None, builder=None, verbose=True): """Convert an MXNet model to the protobuf spec. Parameters ---------- model: MXNet model A trained MXNet neural network model. input_shape: list of tuples A list of (name, shape) tuples, defining the input names and their shapes. The list also serves to define the desired order of the inputs. class_labels: A string or list of strings. As a string it represents the name of the file which contains the classification labels (one per line). As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier. mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. builder: `NeuralNetworkBuilder` If `None`, a builder will be created internally. This also means the builder will not be finalized and returned as an `MLModel`. Post-processing arguments will be ignored and class labels will not be integrated. This option is meant for advanced users. verbose: bool Print exported layers. **kwargs : Provide keyword arguments for: - input shapes. Supplied as a dictionary object with keyword "input_shape". - pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary tell the converted coreml model how to pre-process any input before an inference is run on it. For the list of pre-processing arguments see http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters Returns ------- model: A coreml model. """ if not isinstance(input_shape, list): raise TypeError("Must provide a list for input shape. e.g input_shape=[('data', (3,224,224))]") def remove_batch(dim): return dim[1:] input_names, input_dims = zip(*input_shape) input_dims = list(map(remove_batch, input_dims)) net = model.symbol # Infer shapes and store in a dictionary shapes = net.infer_shape(**dict(input_shape)) arg_names = net.list_arguments() output_names = net.list_outputs() aux_names = net.list_auxiliary_states() shape_dict = {} for idx, op in enumerate(arg_names): shape_dict[op] = shapes[0][idx] for idx, op in enumerate(output_names): shape_dict[op] = shapes[1][idx] for idx, op in enumerate(aux_names): shape_dict[op] = shapes[2][idx] # Get the inputs and outputs output_dims = shapes[1] if mode is None: output_dims = list(map(remove_batch, output_dims)) input_types = [_datatypes.Array(*dim) for dim in input_dims] output_types = [_datatypes.Array(*dim) for dim in output_dims] # Make the builder input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) finalize = builder is None if builder is None: builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode) # Get out the layers net = _json.loads(net.tojson()) nodes = net['nodes'] for i, node in enumerate(nodes): node['id'] = i if node['name'] in shape_dict: node['shape'] = shape_dict[node['name']] node['outputs'] = [] if 'inputs' in node: for ip in node['inputs']: nodes[ip[0]]['outputs'].append([i, 0]) else: node['inputs'] = [] # Mark the head nodes for head in net['heads']: head_id = head[0] head_node = nodes[head_id] head_node['outputs'] = [head] head_node['name'] += "_output" head_node['shape'] = shape_dict[head_node['name']] # For skipped layers, make sure nodes are modified for node in nodes: op = node['op'] inputs = node['inputs'] outputs = node['outputs'] if op in _MXNET_SKIP_LAYERS: nodes[inputs[0][0]]['outputs'][0] = outputs[0] nodes[outputs[0][0]]['inputs'][0] = inputs[0] # Find the input and output names for this node for idx, node in enumerate(nodes): op = node['op'] if op == 'null' or op in _MXNET_SKIP_LAYERS: continue name = node['name'] if verbose: print("%d : %s, %s" % (idx, name, op)) converter_func = _get_layer_converter_fn(op) converter_func(net, node, model, builder) # Only finalize builder if it was created internally. Otherwise, leave it # up to the user. if finalize: # Set the right inputs and outputs _set_input_output_layers(builder, input_names, output_names) builder.set_input(input_names, input_dims) builder.set_output(output_names, output_dims) if preprocessor_args is not None: builder.set_pre_processing_parameters(**preprocessor_args) if class_labels is not None: if type(class_labels) is str: labels = [l.strip() for l in open(class_labels).readlines()] elif type(class_labels) is list: labels = class_labels else: raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels)) builder.set_class_labels(class_labels = labels) # Return the model return _coremltools.models.MLModel(builder.spec)
def convert_models_for_lang(language): """Convert old SingleByteCharSetModels for the given language""" # Validate language language = language.title() lang_metadata = LANGUAGES.get(language) if not lang_metadata: raise ValueError('Unknown language: {}. If you are adding a model for a' ' new language, you must first update metadata/' 'languages.py'.format(language)) lang_mod_name = 'lang{}model'.format(language.lower()) if not os.path.exists(os.path.join('chardet', lang_mod_name + '.py')): print('Skipping {} because it does not have an old model.' .format(language)) return lang_mod = getattr(chardet, lang_mod_name) print('\n{}\n----------------------------------------------------------------' .format(language)) print('Keep ASCII Letters: {}'.format(lang_metadata.use_ascii)) print('Alphabet: {}'.format(lang_metadata.alphabet)) # Create char-to-order maps (aka char-to-rank dicts) charset_models = {} char_ranks = {} order_to_chars = {} for var_name in dir(lang_mod): if not ('Model' in var_name and 'LangModel' not in var_name): continue old_model = getattr(lang_mod, var_name) charset_name = old_model['charset_name'] print('Converting charset model for {}'.format(charset_name)) sys.stdout.flush() charset_models[charset_name] = convert_sbcs_model(old_model, lang_metadata.alphabet) # Since we don't know which charsets have which characters, we have to # try to reconstruct char_ranks (for letters only, since that's all # the old language models contain) for byte_hex, order in iteritems(charset_models[charset_name].char_to_order_map): # order 64 was basically ignored before because of the off by one # error, but it's hard to know if training took that into account if order > 64: continue # Convert to bytes in Python 2 and 3 char = bytes(bytearray((byte_hex,))) try: unicode_char = char.decode(charset_name) except UnicodeDecodeError: continue if unicode_char not in char_ranks: char_ranks[unicode_char] = order order_to_chars[order] = unicode_char elif char_ranks[unicode_char] != order: raise ValueError('Unstable character ranking for {}'.format(unicode_char)) old_lang_model = getattr(lang_mod, '{}LangModel'.format(language.title())) language_model = {} # Preserve off-by-one error here by ignoring first column and row for i in range(1, 64): if i not in order_to_chars: continue lang_char = order_to_chars[i] language_model[lang_char] = {} for j in range(1, 64): if j not in order_to_chars: continue lang_char2 = order_to_chars[j] language_model[lang_char][lang_char2] = old_lang_model[(i * 64) + j] # Write output files print('Writing output file for {}\n\n'.format(language)) sys.stdout.flush() with open('lang{}model.py'.format(language.lower()), 'w') as output_file: upper_lang = language.upper() # print header to set encoding print('#!/usr/bin/env python\n' '# -*- coding: utf-8 -*-\n\n' 'from chardet.sbcharsetprober import SingleByteCharSetModel\n\n', file=output_file) lm_name = '{}_LANG_MODEL'.format(upper_lang) print_language_model(lm_name, language_model, output_file, char_ranks) print('# 255: Undefined characters that did not exist in training text\n' '# 254: Carriage/Return\n' '# 253: symbol (punctuation) that does not belong to word\n' '# 252: 0 - 9\n' '# 251: Control characters\n\n' '# Character Mapping Table(s):', file=output_file) for charset_name, sbcs_model in iteritems(charset_models): normal_name = normalize_name(charset_name) char_to_order_name = ('{}_{}_CHAR_TO_ORDER'.format(normal_name, upper_lang)) print_char_to_order(char_to_order_name, sbcs_model.char_to_order_map, charset_name, output_file) sbcs_model_name = '{}_{}_MODEL'.format(normal_name, upper_lang) sbcs_model.char_to_order_map.clear() sbcs_model_repr = (repr(sbcs_model) .replace('None', lm_name) .replace('{}', char_to_order_name) .replace(', ', (',\n' + ' ' * (len(sbcs_model_name) + 26)))) print('{} = {}\n'.format(sbcs_model_name, sbcs_model_repr), file=output_file)
Convert old SingleByteCharSetModels for the given language
Below is the the instruction that describes the task: ### Input: Convert old SingleByteCharSetModels for the given language ### Response: def convert_models_for_lang(language): """Convert old SingleByteCharSetModels for the given language""" # Validate language language = language.title() lang_metadata = LANGUAGES.get(language) if not lang_metadata: raise ValueError('Unknown language: {}. If you are adding a model for a' ' new language, you must first update metadata/' 'languages.py'.format(language)) lang_mod_name = 'lang{}model'.format(language.lower()) if not os.path.exists(os.path.join('chardet', lang_mod_name + '.py')): print('Skipping {} because it does not have an old model.' .format(language)) return lang_mod = getattr(chardet, lang_mod_name) print('\n{}\n----------------------------------------------------------------' .format(language)) print('Keep ASCII Letters: {}'.format(lang_metadata.use_ascii)) print('Alphabet: {}'.format(lang_metadata.alphabet)) # Create char-to-order maps (aka char-to-rank dicts) charset_models = {} char_ranks = {} order_to_chars = {} for var_name in dir(lang_mod): if not ('Model' in var_name and 'LangModel' not in var_name): continue old_model = getattr(lang_mod, var_name) charset_name = old_model['charset_name'] print('Converting charset model for {}'.format(charset_name)) sys.stdout.flush() charset_models[charset_name] = convert_sbcs_model(old_model, lang_metadata.alphabet) # Since we don't know which charsets have which characters, we have to # try to reconstruct char_ranks (for letters only, since that's all # the old language models contain) for byte_hex, order in iteritems(charset_models[charset_name].char_to_order_map): # order 64 was basically ignored before because of the off by one # error, but it's hard to know if training took that into account if order > 64: continue # Convert to bytes in Python 2 and 3 char = bytes(bytearray((byte_hex,))) try: unicode_char = char.decode(charset_name) except UnicodeDecodeError: continue if unicode_char not in char_ranks: char_ranks[unicode_char] = order order_to_chars[order] = unicode_char elif char_ranks[unicode_char] != order: raise ValueError('Unstable character ranking for {}'.format(unicode_char)) old_lang_model = getattr(lang_mod, '{}LangModel'.format(language.title())) language_model = {} # Preserve off-by-one error here by ignoring first column and row for i in range(1, 64): if i not in order_to_chars: continue lang_char = order_to_chars[i] language_model[lang_char] = {} for j in range(1, 64): if j not in order_to_chars: continue lang_char2 = order_to_chars[j] language_model[lang_char][lang_char2] = old_lang_model[(i * 64) + j] # Write output files print('Writing output file for {}\n\n'.format(language)) sys.stdout.flush() with open('lang{}model.py'.format(language.lower()), 'w') as output_file: upper_lang = language.upper() # print header to set encoding print('#!/usr/bin/env python\n' '# -*- coding: utf-8 -*-\n\n' 'from chardet.sbcharsetprober import SingleByteCharSetModel\n\n', file=output_file) lm_name = '{}_LANG_MODEL'.format(upper_lang) print_language_model(lm_name, language_model, output_file, char_ranks) print('# 255: Undefined characters that did not exist in training text\n' '# 254: Carriage/Return\n' '# 253: symbol (punctuation) that does not belong to word\n' '# 252: 0 - 9\n' '# 251: Control characters\n\n' '# Character Mapping Table(s):', file=output_file) for charset_name, sbcs_model in iteritems(charset_models): normal_name = normalize_name(charset_name) char_to_order_name = ('{}_{}_CHAR_TO_ORDER'.format(normal_name, upper_lang)) print_char_to_order(char_to_order_name, sbcs_model.char_to_order_map, charset_name, output_file) sbcs_model_name = '{}_{}_MODEL'.format(normal_name, upper_lang) sbcs_model.char_to_order_map.clear() sbcs_model_repr = (repr(sbcs_model) .replace('None', lm_name) .replace('{}', char_to_order_name) .replace(', ', (',\n' + ' ' * (len(sbcs_model_name) + 26)))) print('{} = {}\n'.format(sbcs_model_name, sbcs_model_repr), file=output_file)
def updateBar(self, bar): """更新K线""" self.count += 1 if not self.inited and self.count >= self.size: self.inited = True self.openArray[0:self.size - 1] = self.openArray[1:self.size] self.highArray[0:self.size - 1] = self.highArray[1:self.size] self.lowArray[0:self.size - 1] = self.lowArray[1:self.size] self.closeArray[0:self.size - 1] = self.closeArray[1:self.size] self.volumeArray[0:self.size - 1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume
更新K线
Below is the the instruction that describes the task: ### Input: 更新K线 ### Response: def updateBar(self, bar): """更新K线""" self.count += 1 if not self.inited and self.count >= self.size: self.inited = True self.openArray[0:self.size - 1] = self.openArray[1:self.size] self.highArray[0:self.size - 1] = self.highArray[1:self.size] self.lowArray[0:self.size - 1] = self.lowArray[1:self.size] self.closeArray[0:self.size - 1] = self.closeArray[1:self.size] self.volumeArray[0:self.size - 1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume
def update_flowspec_global_table(self, flowspec_family, rules, actions=None, is_withdraw=False): """Update a BGP route in the Global table for Flow Specification. ``flowspec_family`` specifies one of the Flow Specification family name. ``rules`` specifies NLRIs of Flow Specification as a dictionary type value. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. If `is_withdraw` is False, which is the default, add a BGP route to the Global table. If `is_withdraw` is True, remove a BGP route from the Global table. """ from ryu.services.protocols.bgp.core import BgpCoreError from ryu.services.protocols.bgp.api.prefix import ( FLOWSPEC_FAMILY_IPV4, FLOWSPEC_FAMILY_IPV6, FLOWSPEC_FAMILY_L2VPN, ) src_ver_num = 1 peer = None # set mandatory path attributes origin = BGPPathAttributeOrigin(BGP_ATTR_ORIGIN_IGP) aspath = BGPPathAttributeAsPath([[]]) pathattrs = OrderedDict() pathattrs[BGP_ATTR_TYPE_ORIGIN] = origin pathattrs[BGP_ATTR_TYPE_AS_PATH] = aspath if flowspec_family == FLOWSPEC_FAMILY_IPV4: _nlri = FlowSpecIPv4NLRI.from_user(**rules) p = IPv4FlowSpecPath try: communities = create_v4flowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) elif flowspec_family == FLOWSPEC_FAMILY_IPV6: _nlri = FlowSpecIPv6NLRI.from_user(**rules) p = IPv6FlowSpecPath try: communities = create_v6flowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) elif flowspec_family == FLOWSPEC_FAMILY_L2VPN: _nlri = FlowSpecL2VPNNLRI.from_user(**rules) p = L2vpnFlowSpecPath try: communities = create_l2vpnflowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) else: raise BgpCoreError( desc='Unsupported flowspec family %s' % flowspec_family) new_path = p(peer, _nlri, src_ver_num, pattrs=pathattrs, is_withdraw=is_withdraw) # add to global table and propagates to neighbors self.learn_path(new_path)
Update a BGP route in the Global table for Flow Specification. ``flowspec_family`` specifies one of the Flow Specification family name. ``rules`` specifies NLRIs of Flow Specification as a dictionary type value. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. If `is_withdraw` is False, which is the default, add a BGP route to the Global table. If `is_withdraw` is True, remove a BGP route from the Global table.
Below is the the instruction that describes the task: ### Input: Update a BGP route in the Global table for Flow Specification. ``flowspec_family`` specifies one of the Flow Specification family name. ``rules`` specifies NLRIs of Flow Specification as a dictionary type value. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. If `is_withdraw` is False, which is the default, add a BGP route to the Global table. If `is_withdraw` is True, remove a BGP route from the Global table. ### Response: def update_flowspec_global_table(self, flowspec_family, rules, actions=None, is_withdraw=False): """Update a BGP route in the Global table for Flow Specification. ``flowspec_family`` specifies one of the Flow Specification family name. ``rules`` specifies NLRIs of Flow Specification as a dictionary type value. `` actions`` specifies Traffic Filtering Actions of Flow Specification as a dictionary type value. If `is_withdraw` is False, which is the default, add a BGP route to the Global table. If `is_withdraw` is True, remove a BGP route from the Global table. """ from ryu.services.protocols.bgp.core import BgpCoreError from ryu.services.protocols.bgp.api.prefix import ( FLOWSPEC_FAMILY_IPV4, FLOWSPEC_FAMILY_IPV6, FLOWSPEC_FAMILY_L2VPN, ) src_ver_num = 1 peer = None # set mandatory path attributes origin = BGPPathAttributeOrigin(BGP_ATTR_ORIGIN_IGP) aspath = BGPPathAttributeAsPath([[]]) pathattrs = OrderedDict() pathattrs[BGP_ATTR_TYPE_ORIGIN] = origin pathattrs[BGP_ATTR_TYPE_AS_PATH] = aspath if flowspec_family == FLOWSPEC_FAMILY_IPV4: _nlri = FlowSpecIPv4NLRI.from_user(**rules) p = IPv4FlowSpecPath try: communities = create_v4flowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) elif flowspec_family == FLOWSPEC_FAMILY_IPV6: _nlri = FlowSpecIPv6NLRI.from_user(**rules) p = IPv6FlowSpecPath try: communities = create_v6flowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) elif flowspec_family == FLOWSPEC_FAMILY_L2VPN: _nlri = FlowSpecL2VPNNLRI.from_user(**rules) p = L2vpnFlowSpecPath try: communities = create_l2vpnflowspec_actions(actions) except ValueError as e: raise BgpCoreError(desc=str(e)) if communities: pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( BGPPathAttributeExtendedCommunities( communities=communities)) else: raise BgpCoreError( desc='Unsupported flowspec family %s' % flowspec_family) new_path = p(peer, _nlri, src_ver_num, pattrs=pathattrs, is_withdraw=is_withdraw) # add to global table and propagates to neighbors self.learn_path(new_path)
def save_metadata(self, data_dir, feature_name=None): """See base class for details.""" # Recursively save all child features for feature_key, feature in six.iteritems(self._feature_dict): if feature_name: feature_key = '-'.join((feature_name, feature_key)) feature.save_metadata(data_dir, feature_name=feature_key)
See base class for details.
Below is the the instruction that describes the task: ### Input: See base class for details. ### Response: def save_metadata(self, data_dir, feature_name=None): """See base class for details.""" # Recursively save all child features for feature_key, feature in six.iteritems(self._feature_dict): if feature_name: feature_key = '-'.join((feature_name, feature_key)) feature.save_metadata(data_dir, feature_name=feature_key)
def rex_assert(self, rex, byte=False): """ If `rex` expression is not found then raise `DataNotFound` exception. """ self.rex_search(rex, byte=byte)
If `rex` expression is not found then raise `DataNotFound` exception.
Below is the the instruction that describes the task: ### Input: If `rex` expression is not found then raise `DataNotFound` exception. ### Response: def rex_assert(self, rex, byte=False): """ If `rex` expression is not found then raise `DataNotFound` exception. """ self.rex_search(rex, byte=byte)
def audits(self, ticket=None, include=None, **kwargs): """ Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id """ if ticket is not None: return self._query_zendesk(self.endpoint.audits, 'ticket_audit', id=ticket, include=include) else: return self._query_zendesk(self.endpoint.audits.cursor, 'ticket_audit', include=include, **kwargs)
Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id
Below is the the instruction that describes the task: ### Input: Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id ### Response: def audits(self, ticket=None, include=None, **kwargs): """ Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id """ if ticket is not None: return self._query_zendesk(self.endpoint.audits, 'ticket_audit', id=ticket, include=include) else: return self._query_zendesk(self.endpoint.audits.cursor, 'ticket_audit', include=include, **kwargs)
def whereis(self, channel): """ get ocurrences of channel name in the file Parameters ---------- channel : str channel name string Returns ------- ocurrences : tuple Examples -------- >>> mdf = MDF(file_name) >>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file ((1, 2), (2, 4)) >>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file () """ if channel in self: return tuple(self.channels_db[channel]) else: return tuple()
get ocurrences of channel name in the file Parameters ---------- channel : str channel name string Returns ------- ocurrences : tuple Examples -------- >>> mdf = MDF(file_name) >>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file ((1, 2), (2, 4)) >>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file ()
Below is the the instruction that describes the task: ### Input: get ocurrences of channel name in the file Parameters ---------- channel : str channel name string Returns ------- ocurrences : tuple Examples -------- >>> mdf = MDF(file_name) >>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file ((1, 2), (2, 4)) >>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file () ### Response: def whereis(self, channel): """ get ocurrences of channel name in the file Parameters ---------- channel : str channel name string Returns ------- ocurrences : tuple Examples -------- >>> mdf = MDF(file_name) >>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file ((1, 2), (2, 4)) >>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file () """ if channel in self: return tuple(self.channels_db[channel]) else: return tuple()
def configure_logging(level): """ Configure global log level to given one :param level: Level (INFO | DEBUG | WARN | ERROR) :return: """ global logging_level logging_level = logging.ERROR if "info" == level.lower(): logging_level = logging.INFO elif "warn" == level.lower(): logging_level = logging.WARNING elif "debug" == level.lower(): logging_level = logging.DEBUG
Configure global log level to given one :param level: Level (INFO | DEBUG | WARN | ERROR) :return:
Below is the the instruction that describes the task: ### Input: Configure global log level to given one :param level: Level (INFO | DEBUG | WARN | ERROR) :return: ### Response: def configure_logging(level): """ Configure global log level to given one :param level: Level (INFO | DEBUG | WARN | ERROR) :return: """ global logging_level logging_level = logging.ERROR if "info" == level.lower(): logging_level = logging.INFO elif "warn" == level.lower(): logging_level = logging.WARNING elif "debug" == level.lower(): logging_level = logging.DEBUG
def in_memory(self, op_in_mem): r""" If set to True, the output will be stored in memory. """ old_state = self.in_memory if not old_state and op_in_mem: self._map_to_memory() elif not op_in_mem and old_state: self._clear_in_memory()
r""" If set to True, the output will be stored in memory.
Below is the the instruction that describes the task: ### Input: r""" If set to True, the output will be stored in memory. ### Response: def in_memory(self, op_in_mem): r""" If set to True, the output will be stored in memory. """ old_state = self.in_memory if not old_state and op_in_mem: self._map_to_memory() elif not op_in_mem and old_state: self._clear_in_memory()
def all_subclasses(cls): """Recursively returns all the subclasses of the provided class. """ subclasses = cls.__subclasses__() descendants = (descendant for subclass in subclasses for descendant in all_subclasses(subclass)) return set(subclasses) | set(descendants)
Recursively returns all the subclasses of the provided class.
Below is the the instruction that describes the task: ### Input: Recursively returns all the subclasses of the provided class. ### Response: def all_subclasses(cls): """Recursively returns all the subclasses of the provided class. """ subclasses = cls.__subclasses__() descendants = (descendant for subclass in subclasses for descendant in all_subclasses(subclass)) return set(subclasses) | set(descendants)
def connect(url, username, password): """ Return a connected Bitbucket session """ bb_session = stashy.connect(url, username, password) logger.info('Connected to: %s as %s', url, username) return bb_session
Return a connected Bitbucket session
Below is the the instruction that describes the task: ### Input: Return a connected Bitbucket session ### Response: def connect(url, username, password): """ Return a connected Bitbucket session """ bb_session = stashy.connect(url, username, password) logger.info('Connected to: %s as %s', url, username) return bb_session
def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1): ''' Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ''' return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat), x_dim, y_dim)
Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection
Below is the the instruction that describes the task: ### Input: Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ### Response: def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1): ''' Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ''' return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat), x_dim, y_dim)
def send_request_email( authorised_text, authorised_role, authorised_persons, application, link, is_secret): """Sends an email to admin asking to approve user application""" context = CONTEXT.copy() context['requester'] = application.applicant context['link'] = link context['is_secret'] = is_secret context['application'] = application context['authorised_text'] = authorised_text _send_request_email( context, authorised_role, authorised_persons, "common_request")
Sends an email to admin asking to approve user application
Below is the the instruction that describes the task: ### Input: Sends an email to admin asking to approve user application ### Response: def send_request_email( authorised_text, authorised_role, authorised_persons, application, link, is_secret): """Sends an email to admin asking to approve user application""" context = CONTEXT.copy() context['requester'] = application.applicant context['link'] = link context['is_secret'] = is_secret context['application'] = application context['authorised_text'] = authorised_text _send_request_email( context, authorised_role, authorised_persons, "common_request")
def gelman_rubin(self, chain=None, threshold=0.05): r""" Runs the Gelman Rubin diagnostic on the supplied chains. Parameters ---------- chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). threshold : float, optional The maximum deviation permitted from 1 for the final value :math:`\hat{R}` Returns ------- float whether or not the chains pass the test Notes ----- I follow PyMC in calculating the Gelman-Rubin statistic, where, having :math:`m` chains of length :math:`n`, we compute .. math:: B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2 W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right] where :math:`\theta` represents each model parameter. We then compute :math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio :math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters, this ratio deviates from unity by less than the supplied threshold. """ if chain is None: return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))]) index = self.parent._get_chain(chain) assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index) chain = self.parent.chains[index[0]] num_walkers = chain.walkers parameters = chain.parameters name = chain.name data = chain.chain chains = np.split(data, num_walkers) assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker" m = 1.0 * len(chains) n = 1.0 * chains[0].shape[0] all_mean = np.mean(data, axis=0) chain_means = np.array([np.mean(c, axis=0) for c in chains]) chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains]) b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0) w = (1 / m) * chain_var.sum(axis=0) var = (n - 1) * w / n + b / n v = var + b / (n * m) R = np.sqrt(v / w) passed = np.abs(R - 1) < threshold print("Gelman-Rubin Statistic values for chain %s" % name) for p, v, pas in zip(parameters, R, passed): param = "Param %d" % p if isinstance(p, int) else p print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed")) return np.all(passed)
r""" Runs the Gelman Rubin diagnostic on the supplied chains. Parameters ---------- chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). threshold : float, optional The maximum deviation permitted from 1 for the final value :math:`\hat{R}` Returns ------- float whether or not the chains pass the test Notes ----- I follow PyMC in calculating the Gelman-Rubin statistic, where, having :math:`m` chains of length :math:`n`, we compute .. math:: B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2 W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right] where :math:`\theta` represents each model parameter. We then compute :math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio :math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters, this ratio deviates from unity by less than the supplied threshold.
Below is the the instruction that describes the task: ### Input: r""" Runs the Gelman Rubin diagnostic on the supplied chains. Parameters ---------- chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). threshold : float, optional The maximum deviation permitted from 1 for the final value :math:`\hat{R}` Returns ------- float whether or not the chains pass the test Notes ----- I follow PyMC in calculating the Gelman-Rubin statistic, where, having :math:`m` chains of length :math:`n`, we compute .. math:: B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2 W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right] where :math:`\theta` represents each model parameter. We then compute :math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio :math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters, this ratio deviates from unity by less than the supplied threshold. ### Response: def gelman_rubin(self, chain=None, threshold=0.05): r""" Runs the Gelman Rubin diagnostic on the supplied chains. Parameters ---------- chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). threshold : float, optional The maximum deviation permitted from 1 for the final value :math:`\hat{R}` Returns ------- float whether or not the chains pass the test Notes ----- I follow PyMC in calculating the Gelman-Rubin statistic, where, having :math:`m` chains of length :math:`n`, we compute .. math:: B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2 W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right] where :math:`\theta` represents each model parameter. We then compute :math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio :math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters, this ratio deviates from unity by less than the supplied threshold. """ if chain is None: return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))]) index = self.parent._get_chain(chain) assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index) chain = self.parent.chains[index[0]] num_walkers = chain.walkers parameters = chain.parameters name = chain.name data = chain.chain chains = np.split(data, num_walkers) assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker" m = 1.0 * len(chains) n = 1.0 * chains[0].shape[0] all_mean = np.mean(data, axis=0) chain_means = np.array([np.mean(c, axis=0) for c in chains]) chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains]) b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0) w = (1 / m) * chain_var.sum(axis=0) var = (n - 1) * w / n + b / n v = var + b / (n * m) R = np.sqrt(v / w) passed = np.abs(R - 1) < threshold print("Gelman-Rubin Statistic values for chain %s" % name) for p, v, pas in zip(parameters, R, passed): param = "Param %d" % p if isinstance(p, int) else p print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed")) return np.all(passed)
def relative_root_dir(self): """Build the relative root dir path for the bundle version.""" return Path(self.bundle.name) / str(self.created_at.date())
Build the relative root dir path for the bundle version.
Below is the the instruction that describes the task: ### Input: Build the relative root dir path for the bundle version. ### Response: def relative_root_dir(self): """Build the relative root dir path for the bundle version.""" return Path(self.bundle.name) / str(self.created_at.date())
def solve_apply(expr, vars): """Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. """ func = __solve_for_scalar(expr.func, vars) args = [] kwargs = {} for arg in expr.args: if isinstance(arg, ast.Pair): if not isinstance(arg.lhs, ast.Var): raise errors.EfilterError( root=arg.lhs, message="Invalid argument name.") kwargs[arg.key.value] = solve(arg.value, vars).value else: args.append(solve(arg, vars).value) result = applicative.apply(func, args, kwargs) return Result(result, ())
Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation.
Below is the the instruction that describes the task: ### Input: Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. ### Response: def solve_apply(expr, vars): """Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. """ func = __solve_for_scalar(expr.func, vars) args = [] kwargs = {} for arg in expr.args: if isinstance(arg, ast.Pair): if not isinstance(arg.lhs, ast.Var): raise errors.EfilterError( root=arg.lhs, message="Invalid argument name.") kwargs[arg.key.value] = solve(arg.value, vars).value else: args.append(solve(arg, vars).value) result = applicative.apply(func, args, kwargs) return Result(result, ())
def __expire_files(self): """Because files are always unclean""" self.__files = OrderedDict( item for item in self.__files.items() if not item[1].expired )
Because files are always unclean
Below is the the instruction that describes the task: ### Input: Because files are always unclean ### Response: def __expire_files(self): """Because files are always unclean""" self.__files = OrderedDict( item for item in self.__files.items() if not item[1].expired )
def _next_offset(self): """Return the offset of the next line to read.""" if self._filestream: offset = self._filestream.tell() if offset: offset -= 1 else: offset = self._initial_offset return offset
Return the offset of the next line to read.
Below is the the instruction that describes the task: ### Input: Return the offset of the next line to read. ### Response: def _next_offset(self): """Return the offset of the next line to read.""" if self._filestream: offset = self._filestream.tell() if offset: offset -= 1 else: offset = self._initial_offset return offset
def mem_size(self): '''used when allocating memory ingame''' data_len = self._data_mem_size node_count = len(list(self.xml_doc.iter(tag=etree.Element))) if self.compressed: size = 52 * node_count + data_len + 630 else: tags_len = 0 for e in self.xml_doc.iter(tag=etree.Element): e_len = max(len(e.tag), 8) e_len = (e_len + 3) & ~3 tags_len += e_len size = 56 * node_count + data_len + 630 + tags_len # debugging #print('nodes:{} ({}) data:{} ({})'.format(node_count,hex(node_count), data_len, hex(data_len))) return (size + 8) & ~7
used when allocating memory ingame
Below is the the instruction that describes the task: ### Input: used when allocating memory ingame ### Response: def mem_size(self): '''used when allocating memory ingame''' data_len = self._data_mem_size node_count = len(list(self.xml_doc.iter(tag=etree.Element))) if self.compressed: size = 52 * node_count + data_len + 630 else: tags_len = 0 for e in self.xml_doc.iter(tag=etree.Element): e_len = max(len(e.tag), 8) e_len = (e_len + 3) & ~3 tags_len += e_len size = 56 * node_count + data_len + 630 + tags_len # debugging #print('nodes:{} ({}) data:{} ({})'.format(node_count,hex(node_count), data_len, hex(data_len))) return (size + 8) & ~7
def _history_locked(self): """ Returns whether history movement is locked. """ return (self.history_lock and (self._get_edited_history(self._history_index) != self.input_buffer) and (self._get_prompt_cursor().blockNumber() != self._get_end_cursor().blockNumber()))
Returns whether history movement is locked.
Below is the the instruction that describes the task: ### Input: Returns whether history movement is locked. ### Response: def _history_locked(self): """ Returns whether history movement is locked. """ return (self.history_lock and (self._get_edited_history(self._history_index) != self.input_buffer) and (self._get_prompt_cursor().blockNumber() != self._get_end_cursor().blockNumber()))
def create_volume(kwargs=None, call=None, wait_to_finish=False): ''' Create a volume. zone The availability zone used to create the volume. Required. String. size The size of the volume, in GiBs. Defaults to ``10``. Integer. snapshot The snapshot-id from which to create the volume. Integer. type The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. String. iops The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD volumes. Integer. This option will only be set if ``type`` is also specified as ``io1``. encrypted Specifies whether the volume will be encrypted. Boolean. If ``snapshot`` is also given in the list of kwargs, then this value is ignored since volumes that are created from encrypted snapshots are also automatically encrypted. tags The tags to apply to the volume during creation. Dictionary. call The ``create_volume`` function must be called with ``-f`` or ``--function``. String. wait_to_finish Whether or not to wait for the volume to be available. Boolean. Defaults to ``False``. CLI Examples: .. code-block:: bash salt-cloud -f create_volume my-ec2-config zone=us-east-1b salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}' ''' if call != 'function': log.error( 'The create_volume function must be called with -f or --function.' ) return False if 'zone' not in kwargs: log.error('An availability zone must be specified to create a volume.') return False if 'size' not in kwargs and 'snapshot' not in kwargs: # This number represents GiB kwargs['size'] = '10' params = {'Action': 'CreateVolume', 'AvailabilityZone': kwargs['zone']} if 'size' in kwargs: params['Size'] = kwargs['size'] if 'snapshot' in kwargs: params['SnapshotId'] = kwargs['snapshot'] if 'type' in kwargs: params['VolumeType'] = kwargs['type'] if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1': params['Iops'] = kwargs['iops'] # You can't set `encrypted` if you pass a snapshot if 'encrypted' in kwargs and 'snapshot' not in kwargs: params['Encrypted'] = kwargs['encrypted'] if 'kmskeyid' in kwargs: params['KmsKeyId'] = kwargs['kmskeyid'] if 'kmskeyid' in kwargs and 'encrypted' not in kwargs: log.error( 'If a KMS Key ID is specified, encryption must be enabled' ) return False log.debug(params) data = aws.query(params, return_url=True, return_root=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') r_data = {} for d in data[0]: for k, v in six.iteritems(d): r_data[k] = v volume_id = r_data['volumeId'] # Allow tags to be set upon creation if 'tags' in kwargs: if isinstance(kwargs['tags'], six.string_types): tags = salt.utils.yaml.safe_load(kwargs['tags']) else: tags = kwargs['tags'] if isinstance(tags, dict): new_tags = set_tags(tags=tags, resource_id=volume_id, call='action', location=get_location()) r_data['tags'] = new_tags # Waits till volume is available if wait_to_finish: salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes, kwargs={'volume_id': volume_id}, fun_call=call, argument_being_watched='status', required_argument_response='available') return r_data
Create a volume. zone The availability zone used to create the volume. Required. String. size The size of the volume, in GiBs. Defaults to ``10``. Integer. snapshot The snapshot-id from which to create the volume. Integer. type The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. String. iops The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD volumes. Integer. This option will only be set if ``type`` is also specified as ``io1``. encrypted Specifies whether the volume will be encrypted. Boolean. If ``snapshot`` is also given in the list of kwargs, then this value is ignored since volumes that are created from encrypted snapshots are also automatically encrypted. tags The tags to apply to the volume during creation. Dictionary. call The ``create_volume`` function must be called with ``-f`` or ``--function``. String. wait_to_finish Whether or not to wait for the volume to be available. Boolean. Defaults to ``False``. CLI Examples: .. code-block:: bash salt-cloud -f create_volume my-ec2-config zone=us-east-1b salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}'
Below is the the instruction that describes the task: ### Input: Create a volume. zone The availability zone used to create the volume. Required. String. size The size of the volume, in GiBs. Defaults to ``10``. Integer. snapshot The snapshot-id from which to create the volume. Integer. type The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. String. iops The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD volumes. Integer. This option will only be set if ``type`` is also specified as ``io1``. encrypted Specifies whether the volume will be encrypted. Boolean. If ``snapshot`` is also given in the list of kwargs, then this value is ignored since volumes that are created from encrypted snapshots are also automatically encrypted. tags The tags to apply to the volume during creation. Dictionary. call The ``create_volume`` function must be called with ``-f`` or ``--function``. String. wait_to_finish Whether or not to wait for the volume to be available. Boolean. Defaults to ``False``. CLI Examples: .. code-block:: bash salt-cloud -f create_volume my-ec2-config zone=us-east-1b salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}' ### Response: def create_volume(kwargs=None, call=None, wait_to_finish=False): ''' Create a volume. zone The availability zone used to create the volume. Required. String. size The size of the volume, in GiBs. Defaults to ``10``. Integer. snapshot The snapshot-id from which to create the volume. Integer. type The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. String. iops The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD volumes. Integer. This option will only be set if ``type`` is also specified as ``io1``. encrypted Specifies whether the volume will be encrypted. Boolean. If ``snapshot`` is also given in the list of kwargs, then this value is ignored since volumes that are created from encrypted snapshots are also automatically encrypted. tags The tags to apply to the volume during creation. Dictionary. call The ``create_volume`` function must be called with ``-f`` or ``--function``. String. wait_to_finish Whether or not to wait for the volume to be available. Boolean. Defaults to ``False``. CLI Examples: .. code-block:: bash salt-cloud -f create_volume my-ec2-config zone=us-east-1b salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}' ''' if call != 'function': log.error( 'The create_volume function must be called with -f or --function.' ) return False if 'zone' not in kwargs: log.error('An availability zone must be specified to create a volume.') return False if 'size' not in kwargs and 'snapshot' not in kwargs: # This number represents GiB kwargs['size'] = '10' params = {'Action': 'CreateVolume', 'AvailabilityZone': kwargs['zone']} if 'size' in kwargs: params['Size'] = kwargs['size'] if 'snapshot' in kwargs: params['SnapshotId'] = kwargs['snapshot'] if 'type' in kwargs: params['VolumeType'] = kwargs['type'] if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1': params['Iops'] = kwargs['iops'] # You can't set `encrypted` if you pass a snapshot if 'encrypted' in kwargs and 'snapshot' not in kwargs: params['Encrypted'] = kwargs['encrypted'] if 'kmskeyid' in kwargs: params['KmsKeyId'] = kwargs['kmskeyid'] if 'kmskeyid' in kwargs and 'encrypted' not in kwargs: log.error( 'If a KMS Key ID is specified, encryption must be enabled' ) return False log.debug(params) data = aws.query(params, return_url=True, return_root=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') r_data = {} for d in data[0]: for k, v in six.iteritems(d): r_data[k] = v volume_id = r_data['volumeId'] # Allow tags to be set upon creation if 'tags' in kwargs: if isinstance(kwargs['tags'], six.string_types): tags = salt.utils.yaml.safe_load(kwargs['tags']) else: tags = kwargs['tags'] if isinstance(tags, dict): new_tags = set_tags(tags=tags, resource_id=volume_id, call='action', location=get_location()) r_data['tags'] = new_tags # Waits till volume is available if wait_to_finish: salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes, kwargs={'volume_id': volume_id}, fun_call=call, argument_being_watched='status', required_argument_response='available') return r_data
def _mask_cov_func(self, *args): """Masks the covariance function into a form usable by :py:func:`mpmath.diff`. Parameters ---------- *args : `num_dim` * 2 floats The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`. """ # Have to do it in two cases to get the 1d unwrapped properly: if self.num_dim == 1: return self.cov_func(args[0], args[1], *self.params) else: return self.cov_func(args[:self.num_dim], args[self.num_dim:], *self.params)
Masks the covariance function into a form usable by :py:func:`mpmath.diff`. Parameters ---------- *args : `num_dim` * 2 floats The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`.
Below is the the instruction that describes the task: ### Input: Masks the covariance function into a form usable by :py:func:`mpmath.diff`. Parameters ---------- *args : `num_dim` * 2 floats The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`. ### Response: def _mask_cov_func(self, *args): """Masks the covariance function into a form usable by :py:func:`mpmath.diff`. Parameters ---------- *args : `num_dim` * 2 floats The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`. """ # Have to do it in two cases to get the 1d unwrapped properly: if self.num_dim == 1: return self.cov_func(args[0], args[1], *self.params) else: return self.cov_func(args[:self.num_dim], args[self.num_dim:], *self.params)
def reserveIdentifierResponse(self, pid, vendorSpecific=None): """CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][, count]) → Log https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in d1_client.baseclient.py. CNCore.reserveIdentifier(session, pid) → Identifier https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier Args: pid: vendorSpecific: Returns: """ mmp_dict = {'pid': pid} return self.POST(['reserve', pid], fields=mmp_dict, headers=vendorSpecific)
CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][, count]) → Log https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in d1_client.baseclient.py. CNCore.reserveIdentifier(session, pid) → Identifier https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier Args: pid: vendorSpecific: Returns:
Below is the the instruction that describes the task: ### Input: CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][, count]) → Log https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in d1_client.baseclient.py. CNCore.reserveIdentifier(session, pid) → Identifier https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier Args: pid: vendorSpecific: Returns: ### Response: def reserveIdentifierResponse(self, pid, vendorSpecific=None): """CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][, count]) → Log https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in d1_client.baseclient.py. CNCore.reserveIdentifier(session, pid) → Identifier https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier Args: pid: vendorSpecific: Returns: """ mmp_dict = {'pid': pid} return self.POST(['reserve', pid], fields=mmp_dict, headers=vendorSpecific)
def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond)
Return the time part, with tzinfo None.
Below is the the instruction that describes the task: ### Input: Return the time part, with tzinfo None. ### Response: def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond)
def get_next_instance(self, obj): """ Return the next plugin instance for the given object. This differs from `obj.get_next_sibling()` which returns an unsorted sibling. """ ordered_siblings = obj.get_siblings().filter(placeholder=obj.placeholder).order_by('position') pos = list(ordered_siblings).index(obj.cmsplugin_ptr) if pos < ordered_siblings.count() - 1: next_sibling = ordered_siblings[pos + 1] return next_sibling.get_bound_plugin()
Return the next plugin instance for the given object. This differs from `obj.get_next_sibling()` which returns an unsorted sibling.
Below is the the instruction that describes the task: ### Input: Return the next plugin instance for the given object. This differs from `obj.get_next_sibling()` which returns an unsorted sibling. ### Response: def get_next_instance(self, obj): """ Return the next plugin instance for the given object. This differs from `obj.get_next_sibling()` which returns an unsorted sibling. """ ordered_siblings = obj.get_siblings().filter(placeholder=obj.placeholder).order_by('position') pos = list(ordered_siblings).index(obj.cmsplugin_ptr) if pos < ordered_siblings.count() - 1: next_sibling = ordered_siblings[pos + 1] return next_sibling.get_bound_plugin()
def find( cls, name ): """ Finds a particular wizard plugin based on its name. :param name | <str> || None """ if ( cls._plugins is None ): cls._plugins = {} return cls._plugins.get(nativestring(name))
Finds a particular wizard plugin based on its name. :param name | <str> || None
Below is the the instruction that describes the task: ### Input: Finds a particular wizard plugin based on its name. :param name | <str> || None ### Response: def find( cls, name ): """ Finds a particular wizard plugin based on its name. :param name | <str> || None """ if ( cls._plugins is None ): cls._plugins = {} return cls._plugins.get(nativestring(name))
def get_dict_repr(self): """ Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements """ return dict( phase_name = self.phase_name, phase_type = self.phase_type, actions = self.actions )
Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements
Below is the the instruction that describes the task: ### Input: Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements ### Response: def get_dict_repr(self): """ Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements """ return dict( phase_name = self.phase_name, phase_type = self.phase_type, actions = self.actions )
def from_dict(d): """ Re-create the Specs from a dictionary representation. :param Dict[str, Any] d: The dictionary representation. :return: The restored Specs. :rtype: Specs """ return Specs( qubits_specs=sorted([QubitSpecs(id=int(q), fRO=qspecs.get('fRO'), f1QRB=qspecs.get('f1QRB'), T1=qspecs.get('T1'), T2=qspecs.get('T2'), fActiveReset=qspecs.get('fActiveReset')) for q, qspecs in d["1Q"].items()], key=lambda qubit_specs: qubit_specs.id), edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split('-')], fBellState=especs.get('fBellState'), fCZ=especs.get('fCZ'), fCZ_std_err=especs.get('fCZ_std_err'), fCPHASE=especs.get('fCPHASE')) for e, especs in d["2Q"].items()], key=lambda edge_specs: edge_specs.targets) )
Re-create the Specs from a dictionary representation. :param Dict[str, Any] d: The dictionary representation. :return: The restored Specs. :rtype: Specs
Below is the the instruction that describes the task: ### Input: Re-create the Specs from a dictionary representation. :param Dict[str, Any] d: The dictionary representation. :return: The restored Specs. :rtype: Specs ### Response: def from_dict(d): """ Re-create the Specs from a dictionary representation. :param Dict[str, Any] d: The dictionary representation. :return: The restored Specs. :rtype: Specs """ return Specs( qubits_specs=sorted([QubitSpecs(id=int(q), fRO=qspecs.get('fRO'), f1QRB=qspecs.get('f1QRB'), T1=qspecs.get('T1'), T2=qspecs.get('T2'), fActiveReset=qspecs.get('fActiveReset')) for q, qspecs in d["1Q"].items()], key=lambda qubit_specs: qubit_specs.id), edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split('-')], fBellState=especs.get('fBellState'), fCZ=especs.get('fCZ'), fCZ_std_err=especs.get('fCZ_std_err'), fCPHASE=especs.get('fCPHASE')) for e, especs in d["2Q"].items()], key=lambda edge_specs: edge_specs.targets) )
def get_large_image(self, page=1): """ Downloads and returns the large sized image of a single page. The page kwarg specifies which page to return. One is the default. """ url = self.get_large_image_url(page=page) return self._get_url(url)
Downloads and returns the large sized image of a single page. The page kwarg specifies which page to return. One is the default.
Below is the the instruction that describes the task: ### Input: Downloads and returns the large sized image of a single page. The page kwarg specifies which page to return. One is the default. ### Response: def get_large_image(self, page=1): """ Downloads and returns the large sized image of a single page. The page kwarg specifies which page to return. One is the default. """ url = self.get_large_image_url(page=page) return self._get_url(url)
def _null_ac_sia(transition, direction, alpha=0.0): """Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and empty accounts. """ return AcSystemIrreducibilityAnalysis( transition=transition, direction=direction, alpha=alpha, account=(), partitioned_account=() )
Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and empty accounts.
Below is the the instruction that describes the task: ### Input: Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and empty accounts. ### Response: def _null_ac_sia(transition, direction, alpha=0.0): """Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and empty accounts. """ return AcSystemIrreducibilityAnalysis( transition=transition, direction=direction, alpha=alpha, account=(), partitioned_account=() )
def setcontents(source, identifier, pointer): """Patch existing bibliographic record.""" record = Record.get_record(identifier) Document(record, pointer).setcontents(source)
Patch existing bibliographic record.
Below is the the instruction that describes the task: ### Input: Patch existing bibliographic record. ### Response: def setcontents(source, identifier, pointer): """Patch existing bibliographic record.""" record = Record.get_record(identifier) Document(record, pointer).setcontents(source)
def _create_memory_database_interface(self) -> GraphDatabaseInterface: """ Creates and returns the in-memory database interface the graph will use. """ Base = declarative_base() engine = sqlalchemy.create_engine("sqlite://", poolclass=StaticPool) Session = sessionmaker(bind=engine) dbi: GraphDatabaseInterface = create_graph_database_interface( sqlalchemy, Session(), Base, sqlalchemy.orm.relationship ) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) return dbi
Creates and returns the in-memory database interface the graph will use.
Below is the the instruction that describes the task: ### Input: Creates and returns the in-memory database interface the graph will use. ### Response: def _create_memory_database_interface(self) -> GraphDatabaseInterface: """ Creates and returns the in-memory database interface the graph will use. """ Base = declarative_base() engine = sqlalchemy.create_engine("sqlite://", poolclass=StaticPool) Session = sessionmaker(bind=engine) dbi: GraphDatabaseInterface = create_graph_database_interface( sqlalchemy, Session(), Base, sqlalchemy.orm.relationship ) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) return dbi
def createEditor( self, parent, option, index ): """ Creates a new editor for the given index parented to the inputed widget. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None """ # determine the editor to use for the inputed index based on the # table column item = self.parent().itemFromIndex(index) column = self.column(index) # edit based on column preferences if column and \ not column.isReadOnly() and \ isinstance(item, XOrbRecordItem): plugin = self.plugin(column) if not plugin: return None return plugin.createEditor(parent, item.record(), column) return super(XOrbTreeWidgetDelegate, self).createEditor(parent, option, index)
Creates a new editor for the given index parented to the inputed widget. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None
Below is the the instruction that describes the task: ### Input: Creates a new editor for the given index parented to the inputed widget. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None ### Response: def createEditor( self, parent, option, index ): """ Creates a new editor for the given index parented to the inputed widget. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None """ # determine the editor to use for the inputed index based on the # table column item = self.parent().itemFromIndex(index) column = self.column(index) # edit based on column preferences if column and \ not column.isReadOnly() and \ isinstance(item, XOrbRecordItem): plugin = self.plugin(column) if not plugin: return None return plugin.createEditor(parent, item.record(), column) return super(XOrbTreeWidgetDelegate, self).createEditor(parent, option, index)
def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ One to one constructor for mapping some shape of probTrue to a matching shaped Bernoulli. :param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar """ return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true))
One to one constructor for mapping some shape of probTrue to a matching shaped Bernoulli. :param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar
Below is the the instruction that describes the task: ### Input: One to one constructor for mapping some shape of probTrue to a matching shaped Bernoulli. :param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar ### Response: def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ One to one constructor for mapping some shape of probTrue to a matching shaped Bernoulli. :param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar """ return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true))
def onPollCreated( self, mid=None, poll=None, author_id=None, thread_id=None, thread_type=None, ts=None, metadata=None, msg=None, ): """ Called when the client is listening, and somebody creates a group poll :param mid: The action ID :param poll: Created poll :param author_id: The ID of the person who created the poll :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type poll: models.Poll :type thread_type: models.ThreadType """ log.info( "{} created poll {} in {} ({})".format( author_id, poll, thread_id, thread_type.name ) )
Called when the client is listening, and somebody creates a group poll :param mid: The action ID :param poll: Created poll :param author_id: The ID of the person who created the poll :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type poll: models.Poll :type thread_type: models.ThreadType
Below is the the instruction that describes the task: ### Input: Called when the client is listening, and somebody creates a group poll :param mid: The action ID :param poll: Created poll :param author_id: The ID of the person who created the poll :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type poll: models.Poll :type thread_type: models.ThreadType ### Response: def onPollCreated( self, mid=None, poll=None, author_id=None, thread_id=None, thread_type=None, ts=None, metadata=None, msg=None, ): """ Called when the client is listening, and somebody creates a group poll :param mid: The action ID :param poll: Created poll :param author_id: The ID of the person who created the poll :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type poll: models.Poll :type thread_type: models.ThreadType """ log.info( "{} created poll {} in {} ({})".format( author_id, poll, thread_id, thread_type.name ) )
def discover(self, details = False): 'Discover API definitions. Set details=true to show details' if details and not (isinstance(details, str) and details.lower() == 'false'): return copy.deepcopy(self.discoverinfo) else: return dict((k,v.get('description', '')) for k,v in self.discoverinfo.items())
Discover API definitions. Set details=true to show details
Below is the the instruction that describes the task: ### Input: Discover API definitions. Set details=true to show details ### Response: def discover(self, details = False): 'Discover API definitions. Set details=true to show details' if details and not (isinstance(details, str) and details.lower() == 'false'): return copy.deepcopy(self.discoverinfo) else: return dict((k,v.get('description', '')) for k,v in self.discoverinfo.items())
def getfile(self): """Gets the full file path of the entered/selected file :returns: str -- the name of the data file to open/create """ current_file = str(self.selectedFiles()[0]) if os.path.isfile(current_file): print 'current_file', current_file if current_file.endswith('.raw') or current_file.endswith('.pst'): fmode = 'r' else: fmode = 'a' else: if not current_file.endswith('.hdf5') and not current_file.endswith('.h5'): current_file += '.hdf5' fmode = 'w-' return current_file, fmode
Gets the full file path of the entered/selected file :returns: str -- the name of the data file to open/create
Below is the the instruction that describes the task: ### Input: Gets the full file path of the entered/selected file :returns: str -- the name of the data file to open/create ### Response: def getfile(self): """Gets the full file path of the entered/selected file :returns: str -- the name of the data file to open/create """ current_file = str(self.selectedFiles()[0]) if os.path.isfile(current_file): print 'current_file', current_file if current_file.endswith('.raw') or current_file.endswith('.pst'): fmode = 'r' else: fmode = 'a' else: if not current_file.endswith('.hdf5') and not current_file.endswith('.h5'): current_file += '.hdf5' fmode = 'w-' return current_file, fmode
def _copy(self, axis=True, attr=True, data=False): """Create a new instance of Data, but does not copy the data necessarily. Parameters ---------- axis : bool, optional deep copy the axes (default: True) attr : bool, optional deep copy the attributes (default: True) data : bool, optional deep copy the data (default: False) Returns ------- instance of Data (or ChanTime, ChanFreq, ChanTimeFreq) copy of the data, but without the actual data Notes ----- It's important that we copy all the relevant information here. If you add new attributes, you should add them here. Remember that it deep-copies all the information, so if you copy data the size might become really large. """ cdata = type(self)() # create instance of the same class cdata.s_freq = self.s_freq cdata.start_time = self.start_time if axis: cdata.axis = deepcopy(self.axis) else: cdata_axis = OrderedDict() for axis_name in self.axis: cdata_axis[axis_name] = array([], dtype='O') cdata.axis = cdata_axis if attr: cdata.attr = deepcopy(self.attr) if data: cdata.data = deepcopy(self.data) else: # empty data with the correct number of trials cdata.data = empty(self.number_of('trial'), dtype='O') return cdata
Create a new instance of Data, but does not copy the data necessarily. Parameters ---------- axis : bool, optional deep copy the axes (default: True) attr : bool, optional deep copy the attributes (default: True) data : bool, optional deep copy the data (default: False) Returns ------- instance of Data (or ChanTime, ChanFreq, ChanTimeFreq) copy of the data, but without the actual data Notes ----- It's important that we copy all the relevant information here. If you add new attributes, you should add them here. Remember that it deep-copies all the information, so if you copy data the size might become really large.
Below is the the instruction that describes the task: ### Input: Create a new instance of Data, but does not copy the data necessarily. Parameters ---------- axis : bool, optional deep copy the axes (default: True) attr : bool, optional deep copy the attributes (default: True) data : bool, optional deep copy the data (default: False) Returns ------- instance of Data (or ChanTime, ChanFreq, ChanTimeFreq) copy of the data, but without the actual data Notes ----- It's important that we copy all the relevant information here. If you add new attributes, you should add them here. Remember that it deep-copies all the information, so if you copy data the size might become really large. ### Response: def _copy(self, axis=True, attr=True, data=False): """Create a new instance of Data, but does not copy the data necessarily. Parameters ---------- axis : bool, optional deep copy the axes (default: True) attr : bool, optional deep copy the attributes (default: True) data : bool, optional deep copy the data (default: False) Returns ------- instance of Data (or ChanTime, ChanFreq, ChanTimeFreq) copy of the data, but without the actual data Notes ----- It's important that we copy all the relevant information here. If you add new attributes, you should add them here. Remember that it deep-copies all the information, so if you copy data the size might become really large. """ cdata = type(self)() # create instance of the same class cdata.s_freq = self.s_freq cdata.start_time = self.start_time if axis: cdata.axis = deepcopy(self.axis) else: cdata_axis = OrderedDict() for axis_name in self.axis: cdata_axis[axis_name] = array([], dtype='O') cdata.axis = cdata_axis if attr: cdata.attr = deepcopy(self.attr) if data: cdata.data = deepcopy(self.data) else: # empty data with the correct number of trials cdata.data = empty(self.number_of('trial'), dtype='O') return cdata
def factor_mark(field_name, markers, factors, start=0, end=None): ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict ''' return field(field_name, CategoricalMarkerMapper(markers=markers, factors=factors, start=start, end=end))
Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict
Below is the the instruction that describes the task: ### Input: Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict ### Response: def factor_mark(field_name, markers, factors, start=0, end=None): ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict ''' return field(field_name, CategoricalMarkerMapper(markers=markers, factors=factors, start=start, end=end))
def shutdown(self, force=False): """ Stop executing any further jobs. If the force argument is True, the function does not wait until any queued jobs are completed but stops immediately. After emptying the queue it is restarted, so you may still call run() after using this method. :type force: bool :param force: Whether to wait until all jobs were processed. """ if not force: self.join() self._dbg(2, 'Shutting down queue...') self.workqueue.shutdown(True) self._dbg(2, 'Queue shut down.') self._del_status_bar()
Stop executing any further jobs. If the force argument is True, the function does not wait until any queued jobs are completed but stops immediately. After emptying the queue it is restarted, so you may still call run() after using this method. :type force: bool :param force: Whether to wait until all jobs were processed.
Below is the the instruction that describes the task: ### Input: Stop executing any further jobs. If the force argument is True, the function does not wait until any queued jobs are completed but stops immediately. After emptying the queue it is restarted, so you may still call run() after using this method. :type force: bool :param force: Whether to wait until all jobs were processed. ### Response: def shutdown(self, force=False): """ Stop executing any further jobs. If the force argument is True, the function does not wait until any queued jobs are completed but stops immediately. After emptying the queue it is restarted, so you may still call run() after using this method. :type force: bool :param force: Whether to wait until all jobs were processed. """ if not force: self.join() self._dbg(2, 'Shutting down queue...') self.workqueue.shutdown(True) self._dbg(2, 'Queue shut down.') self._del_status_bar()
def find_version_by_string_lib(line): # type: (str)->Optional[str] """ No regex parsing. Or at least, mostly, not regex. """ if not line: return None simplified_line = simplify_line(line) version = None if simplified_line.startswith("version="): if '"' not in simplified_line: pass # logger.debug("Weird version string, no double quote : " + unicode((full_path, line, simplified_line))) else: if "=" in simplified_line: post_equals = simplified_line.split("=")[0] if '"' in post_equals: parts = post_equals.split('"') if len(parts) != 3: # logger.debug("Weird string, more than 3 parts : " + unicode((full_path, line, simplified_line))) version = parts[0] return version
No regex parsing. Or at least, mostly, not regex.
Below is the the instruction that describes the task: ### Input: No regex parsing. Or at least, mostly, not regex. ### Response: def find_version_by_string_lib(line): # type: (str)->Optional[str] """ No regex parsing. Or at least, mostly, not regex. """ if not line: return None simplified_line = simplify_line(line) version = None if simplified_line.startswith("version="): if '"' not in simplified_line: pass # logger.debug("Weird version string, no double quote : " + unicode((full_path, line, simplified_line))) else: if "=" in simplified_line: post_equals = simplified_line.split("=")[0] if '"' in post_equals: parts = post_equals.split('"') if len(parts) != 3: # logger.debug("Weird string, more than 3 parts : " + unicode((full_path, line, simplified_line))) version = parts[0] return version
def generateLowerBoundList(confidence, numUniqueFeatures, numLocationsPerObject, maxNumObjects): """ Metric: How unique is each object's most unique feature? Calculate the probabilistic lower bound for the number of occurrences of an object's most unique feature. For example, if confidence is 0.8, the tick "3" will be placed at the point where 80% of objects are completely composed of features with 3 or more total occurrences, and 20% of objects have at least one feature that has 2 or fewer total occurrences. """ # We're choosing a location, checking its feature, and checking how many # *other* occurrences there are of this feature. So we check n - 1 locations. maxNumOtherLocations = maxNumObjects*10 - 1 results = zip(itertools.count(1), findBinomialNsWithLowerBoundSampleMinimum( confidence, itertools.count(1), 1./numUniqueFeatures, numLocationsPerObject, maxNumOtherLocations)) finalResults = [(numOtherLocations, interpolatedN / numLocationsPerObject) for numOtherLocations, (interpolatedN, _, _) in results] return finalResults
Metric: How unique is each object's most unique feature? Calculate the probabilistic lower bound for the number of occurrences of an object's most unique feature. For example, if confidence is 0.8, the tick "3" will be placed at the point where 80% of objects are completely composed of features with 3 or more total occurrences, and 20% of objects have at least one feature that has 2 or fewer total occurrences.
Below is the the instruction that describes the task: ### Input: Metric: How unique is each object's most unique feature? Calculate the probabilistic lower bound for the number of occurrences of an object's most unique feature. For example, if confidence is 0.8, the tick "3" will be placed at the point where 80% of objects are completely composed of features with 3 or more total occurrences, and 20% of objects have at least one feature that has 2 or fewer total occurrences. ### Response: def generateLowerBoundList(confidence, numUniqueFeatures, numLocationsPerObject, maxNumObjects): """ Metric: How unique is each object's most unique feature? Calculate the probabilistic lower bound for the number of occurrences of an object's most unique feature. For example, if confidence is 0.8, the tick "3" will be placed at the point where 80% of objects are completely composed of features with 3 or more total occurrences, and 20% of objects have at least one feature that has 2 or fewer total occurrences. """ # We're choosing a location, checking its feature, and checking how many # *other* occurrences there are of this feature. So we check n - 1 locations. maxNumOtherLocations = maxNumObjects*10 - 1 results = zip(itertools.count(1), findBinomialNsWithLowerBoundSampleMinimum( confidence, itertools.count(1), 1./numUniqueFeatures, numLocationsPerObject, maxNumOtherLocations)) finalResults = [(numOtherLocations, interpolatedN / numLocationsPerObject) for numOtherLocations, (interpolatedN, _, _) in results] return finalResults
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 config_file): ''' Execute LDAP searches and return the aggregated data ''' config_template = None try: config_template = _render_template(config_file) except jinja2.exceptions.TemplateNotFound: log.debug('pillar_ldap: missing configuration file %s', config_file) except Exception: log.debug('pillar_ldap: failed to render template for %s', config_file, exc_info=True) if not config_template: # We don't have a config file return {} import salt.utils.yaml try: opts = salt.utils.yaml.safe_load(config_template) or {} opts['conf_file'] = config_file except Exception as err: import salt.log msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format( config_file, err ) if salt.log.is_console_configured(): log.warning(msg) else: print(msg) return {} else: if not isinstance(opts, dict): log.warning( 'pillar_ldap: %s is invalidly formatted, must be a YAML ' 'dictionary. See the documentation for more information.', config_file ) return {} if 'search_order' not in opts: log.warning( 'pillar_ldap: search_order missing from configuration. See the ' 'documentation for more information.' ) return {} data = {} for source in opts['search_order']: config = opts[source] result = _do_search(config) log.debug('source %s got result %s', source, result) if result: data = _result_to_dict(data, result, config, source) return data
Execute LDAP searches and return the aggregated data
Below is the the instruction that describes the task: ### Input: Execute LDAP searches and return the aggregated data ### Response: def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 config_file): ''' Execute LDAP searches and return the aggregated data ''' config_template = None try: config_template = _render_template(config_file) except jinja2.exceptions.TemplateNotFound: log.debug('pillar_ldap: missing configuration file %s', config_file) except Exception: log.debug('pillar_ldap: failed to render template for %s', config_file, exc_info=True) if not config_template: # We don't have a config file return {} import salt.utils.yaml try: opts = salt.utils.yaml.safe_load(config_template) or {} opts['conf_file'] = config_file except Exception as err: import salt.log msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format( config_file, err ) if salt.log.is_console_configured(): log.warning(msg) else: print(msg) return {} else: if not isinstance(opts, dict): log.warning( 'pillar_ldap: %s is invalidly formatted, must be a YAML ' 'dictionary. See the documentation for more information.', config_file ) return {} if 'search_order' not in opts: log.warning( 'pillar_ldap: search_order missing from configuration. See the ' 'documentation for more information.' ) return {} data = {} for source in opts['search_order']: config = opts[source] result = _do_search(config) log.debug('source %s got result %s', source, result) if result: data = _result_to_dict(data, result, config, source) return data
def get_system_uptime_output_show_system_uptime_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_system_uptime = ET.Element("get_system_uptime") config = get_system_uptime output = ET.SubElement(get_system_uptime, "output") show_system_uptime = ET.SubElement(output, "show-system-uptime") rbridge_id = ET.SubElement(show_system_uptime, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_system_uptime_output_show_system_uptime_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_system_uptime = ET.Element("get_system_uptime") config = get_system_uptime output = ET.SubElement(get_system_uptime, "output") show_system_uptime = ET.SubElement(output, "show-system-uptime") rbridge_id = ET.SubElement(show_system_uptime, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def export_module_spec_with_checkpoint(module_spec, checkpoint_path, export_path, scope_prefix=""): """Exports given checkpoint as tfhub module with given spec.""" # The main requirement is that it is possible to know how to map from # module variable name to checkpoint variable name. # This is trivial if the original code used variable scopes, # but can be messy if the variables to export are interwined # with variables not export. with tf.Graph().as_default(): m = hub.Module(module_spec) assign_map = { scope_prefix + name: value for name, value in m.variable_map.items() } tf.train.init_from_checkpoint(checkpoint_path, assign_map) init_op = tf.initializers.global_variables() with tf.Session() as session: session.run(init_op) m.export(export_path, session)
Exports given checkpoint as tfhub module with given spec.
Below is the the instruction that describes the task: ### Input: Exports given checkpoint as tfhub module with given spec. ### Response: def export_module_spec_with_checkpoint(module_spec, checkpoint_path, export_path, scope_prefix=""): """Exports given checkpoint as tfhub module with given spec.""" # The main requirement is that it is possible to know how to map from # module variable name to checkpoint variable name. # This is trivial if the original code used variable scopes, # but can be messy if the variables to export are interwined # with variables not export. with tf.Graph().as_default(): m = hub.Module(module_spec) assign_map = { scope_prefix + name: value for name, value in m.variable_map.items() } tf.train.init_from_checkpoint(checkpoint_path, assign_map) init_op = tf.initializers.global_variables() with tf.Session() as session: session.run(init_op) m.export(export_path, session)
def gen_smul(src1, src2, dst): """Return a SMUL instruction. """ assert src1.size == src2.size return ReilBuilder.build(ReilMnemonic.SMUL, src1, src2, dst)
Return a SMUL instruction.
Below is the the instruction that describes the task: ### Input: Return a SMUL instruction. ### Response: def gen_smul(src1, src2, dst): """Return a SMUL instruction. """ assert src1.size == src2.size return ReilBuilder.build(ReilMnemonic.SMUL, src1, src2, dst)
def update(self, path): """ Update the attributes of this CartItem. """ self._reset() self.path = path self._refresh_synced() if self.is_synced: self._refresh_path() self._refresh_signed() self._refresh_nvr()
Update the attributes of this CartItem.
Below is the the instruction that describes the task: ### Input: Update the attributes of this CartItem. ### Response: def update(self, path): """ Update the attributes of this CartItem. """ self._reset() self.path = path self._refresh_synced() if self.is_synced: self._refresh_path() self._refresh_signed() self._refresh_nvr()
def urlencode(txt): """Url encode a path.""" if isinstance(txt, unicode): txt = txt.encode('utf-8') return urllib.quote_plus(txt)
Url encode a path.
Below is the the instruction that describes the task: ### Input: Url encode a path. ### Response: def urlencode(txt): """Url encode a path.""" if isinstance(txt, unicode): txt = txt.encode('utf-8') return urllib.quote_plus(txt)
def bulk_insert(self, rows, return_model=False): """Creates multiple new records in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: rows: An array of dictionaries, where each dictionary describes the fields to insert. return_model (default: False): If model instances should be returned rather than just dicts. Returns: A list of either the dicts of the rows inserted, including the pk or the models of the rows inserted with defaults for any fields not specified """ if self.conflict_target or self.conflict_action: compiler = self._build_insert_compiler(rows) objs = compiler.execute_sql(return_id=True) if return_model: return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)] else: return [dict(r, **k) for r, k in zip(rows, objs)] # no special action required, use the standard Django bulk_create(..) return super().bulk_create([self.model(**fields) for fields in rows])
Creates multiple new records in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: rows: An array of dictionaries, where each dictionary describes the fields to insert. return_model (default: False): If model instances should be returned rather than just dicts. Returns: A list of either the dicts of the rows inserted, including the pk or the models of the rows inserted with defaults for any fields not specified
Below is the the instruction that describes the task: ### Input: Creates multiple new records in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: rows: An array of dictionaries, where each dictionary describes the fields to insert. return_model (default: False): If model instances should be returned rather than just dicts. Returns: A list of either the dicts of the rows inserted, including the pk or the models of the rows inserted with defaults for any fields not specified ### Response: def bulk_insert(self, rows, return_model=False): """Creates multiple new records in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: rows: An array of dictionaries, where each dictionary describes the fields to insert. return_model (default: False): If model instances should be returned rather than just dicts. Returns: A list of either the dicts of the rows inserted, including the pk or the models of the rows inserted with defaults for any fields not specified """ if self.conflict_target or self.conflict_action: compiler = self._build_insert_compiler(rows) objs = compiler.execute_sql(return_id=True) if return_model: return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)] else: return [dict(r, **k) for r, k in zip(rows, objs)] # no special action required, use the standard Django bulk_create(..) return super().bulk_create([self.model(**fields) for fields in rows])
def get_all_user(self, **kwargs): # noqa: E501 """Get all users # noqa: E501 Returns all users # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_user(async_req=True) >>> result = thread.get() :param async_req bool :return: list[UserModel] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_user_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_user_with_http_info(**kwargs) # noqa: E501 return data
Get all users # noqa: E501 Returns all users # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_user(async_req=True) >>> result = thread.get() :param async_req bool :return: list[UserModel] If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Get all users # noqa: E501 Returns all users # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_user(async_req=True) >>> result = thread.get() :param async_req bool :return: list[UserModel] If the method is called asynchronously, returns the request thread. ### Response: def get_all_user(self, **kwargs): # noqa: E501 """Get all users # noqa: E501 Returns all users # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_user(async_req=True) >>> result = thread.get() :param async_req bool :return: list[UserModel] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_user_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_user_with_http_info(**kwargs) # noqa: E501 return data
def _get_keycachelike(self, keycache, keys, get_adds_dels, parentity, branch, turn, tick, *, forward): """Try to retrieve a frozenset representing extant keys. If I can't, generate one, store it, and return it. """ keycache_key = parentity + (branch,) keycache2 = keycache3 = None if keycache_key in keycache: keycache2 = keycache[keycache_key] if turn in keycache2: keycache3 = keycache2[turn] if tick in keycache3: return keycache3[tick] if forward: # Take valid values from the past of a keycache and copy them forward, into the present. # Assumes that time is only moving forward, never backward, never skipping any turns or ticks, # and any changes to the world state are happening through allegedb proper, meaning they'll all get cached. # In LiSE this means every change to the world state should happen inside of a call to # ``Engine.next_turn`` in a rule. if keycache2 and keycache2.rev_gettable(turn): # there's a keycache from a prior turn in this branch. Get it if turn not in keycache2: # since it's not this *exact* turn there might be changes... old_turn = keycache2.rev_before(turn) old_turn_kc = keycache2[turn] added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, old_turn, old_turn_kc.end ) ) ret = old_turn_kc[old_turn_kc.end].union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow new_turn_kc = WindowDict() new_turn_kc[tick] = ret keycache2[turn] = new_turn_kc return ret if not keycache3: keycache3 = keycache2[turn] if tick not in keycache3: if keycache3.rev_gettable(tick): added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, turn, keycache3.rev_before(tick) ) ) ret = keycache3[tick].union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow keycache3[tick] = ret return ret else: turn_before = keycache2.rev_before(turn) tick_before = keycache2[turn_before].end keys_before = keycache2[turn_before][tick_before] added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, turn_before, tick_before ) ) ret = keycache3[tick] = keys_before.union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return ret # assert kcturn[tick] == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return keycache3[tick] else: for (parbranch, parturn, partick) in self.db._iter_parent_btt(branch, turn, tick): par_kc_key = parentity + (parbranch,) if par_kc_key in keycache: kcpkc = keycache[par_kc_key] if parturn in kcpkc and kcpkc[parturn].rev_gettable(partick): parkeys = kcpkc[parturn][partick] break elif kcpkc.rev_gettable(parturn-1): partkeys = kcpkc[parturn-1] parkeys = partkeys[partkeys.end] break else: parkeys = frozenset() keycache2 = SettingsTurnDict() added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( parbranch, parturn, partick ) ) ret = parkeys.union(added).difference(deleted) keycache2[turn] = {tick: ret} keycache[keycache_key] = keycache2 # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return ret ret = frozenset(get_adds_dels(keys[parentity], branch, turn, tick)[0]) if keycache2: if keycache3: keycache3[tick] = ret else: keycache2[turn] = {tick: ret} else: kcc = SettingsTurnDict() kcc[turn] = {tick: ret} keycache[keycache_key] = kcc return ret
Try to retrieve a frozenset representing extant keys. If I can't, generate one, store it, and return it.
Below is the the instruction that describes the task: ### Input: Try to retrieve a frozenset representing extant keys. If I can't, generate one, store it, and return it. ### Response: def _get_keycachelike(self, keycache, keys, get_adds_dels, parentity, branch, turn, tick, *, forward): """Try to retrieve a frozenset representing extant keys. If I can't, generate one, store it, and return it. """ keycache_key = parentity + (branch,) keycache2 = keycache3 = None if keycache_key in keycache: keycache2 = keycache[keycache_key] if turn in keycache2: keycache3 = keycache2[turn] if tick in keycache3: return keycache3[tick] if forward: # Take valid values from the past of a keycache and copy them forward, into the present. # Assumes that time is only moving forward, never backward, never skipping any turns or ticks, # and any changes to the world state are happening through allegedb proper, meaning they'll all get cached. # In LiSE this means every change to the world state should happen inside of a call to # ``Engine.next_turn`` in a rule. if keycache2 and keycache2.rev_gettable(turn): # there's a keycache from a prior turn in this branch. Get it if turn not in keycache2: # since it's not this *exact* turn there might be changes... old_turn = keycache2.rev_before(turn) old_turn_kc = keycache2[turn] added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, old_turn, old_turn_kc.end ) ) ret = old_turn_kc[old_turn_kc.end].union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow new_turn_kc = WindowDict() new_turn_kc[tick] = ret keycache2[turn] = new_turn_kc return ret if not keycache3: keycache3 = keycache2[turn] if tick not in keycache3: if keycache3.rev_gettable(tick): added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, turn, keycache3.rev_before(tick) ) ) ret = keycache3[tick].union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow keycache3[tick] = ret return ret else: turn_before = keycache2.rev_before(turn) tick_before = keycache2[turn_before].end keys_before = keycache2[turn_before][tick_before] added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( branch, turn_before, tick_before ) ) ret = keycache3[tick] = keys_before.union(added).difference(deleted) # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return ret # assert kcturn[tick] == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return keycache3[tick] else: for (parbranch, parturn, partick) in self.db._iter_parent_btt(branch, turn, tick): par_kc_key = parentity + (parbranch,) if par_kc_key in keycache: kcpkc = keycache[par_kc_key] if parturn in kcpkc and kcpkc[parturn].rev_gettable(partick): parkeys = kcpkc[parturn][partick] break elif kcpkc.rev_gettable(parturn-1): partkeys = kcpkc[parturn-1] parkeys = partkeys[partkeys.end] break else: parkeys = frozenset() keycache2 = SettingsTurnDict() added, deleted = get_adds_dels( keys[parentity], branch, turn, tick, stoptime=( parbranch, parturn, partick ) ) ret = parkeys.union(added).difference(deleted) keycache2[turn] = {tick: ret} keycache[keycache_key] = keycache2 # assert ret == get_adds_dels(keys[parentity], branch, turn, tick)[0] # slow return ret ret = frozenset(get_adds_dels(keys[parentity], branch, turn, tick)[0]) if keycache2: if keycache3: keycache3[tick] = ret else: keycache2[turn] = {tick: ret} else: kcc = SettingsTurnDict() kcc[turn] = {tick: ret} keycache[keycache_key] = kcc return ret
def _file_not_empty(tmpfile): """ Returns True if file exists and it is not empty to check if it is time to read container ID from cidfile :param tmpfile: str, path to file :return: bool, True if container id is written to the file """ if os.path.exists(tmpfile): return os.stat(tmpfile).st_size != 0 else: return False
Returns True if file exists and it is not empty to check if it is time to read container ID from cidfile :param tmpfile: str, path to file :return: bool, True if container id is written to the file
Below is the the instruction that describes the task: ### Input: Returns True if file exists and it is not empty to check if it is time to read container ID from cidfile :param tmpfile: str, path to file :return: bool, True if container id is written to the file ### Response: def _file_not_empty(tmpfile): """ Returns True if file exists and it is not empty to check if it is time to read container ID from cidfile :param tmpfile: str, path to file :return: bool, True if container id is written to the file """ if os.path.exists(tmpfile): return os.stat(tmpfile).st_size != 0 else: return False
def is_kanji(data): """\ Returns if the `data` can be encoded in "kanji" mode. :param bytes data: The data to check. :rtype: bool """ data_len = len(data) if not data_len or data_len % 2: return False if _PY2: data = (ord(c) for c in data) data_iter = iter(data) for i in range(0, data_len, 2): code = (next(data_iter) << 8) | next(data_iter) if not (0x8140 <= code <= 0x9ffc or 0xe040 <= code <= 0xebbf): return False return True
\ Returns if the `data` can be encoded in "kanji" mode. :param bytes data: The data to check. :rtype: bool
Below is the the instruction that describes the task: ### Input: \ Returns if the `data` can be encoded in "kanji" mode. :param bytes data: The data to check. :rtype: bool ### Response: def is_kanji(data): """\ Returns if the `data` can be encoded in "kanji" mode. :param bytes data: The data to check. :rtype: bool """ data_len = len(data) if not data_len or data_len % 2: return False if _PY2: data = (ord(c) for c in data) data_iter = iter(data) for i in range(0, data_len, 2): code = (next(data_iter) << 8) | next(data_iter) if not (0x8140 <= code <= 0x9ffc or 0xe040 <= code <= 0xebbf): return False return True
def __build_level(self, previous_level_blocks, level): """! @brief Build new level of directory. @param[in] previous_level_blocks (list): BANG-blocks on the previous level. @param[in] level (uint): Level number that should be built. @return (list) New block on the specified level. """ current_level_blocks = [] split_dimension = level % len(self.__data[0]) cache_require = (level == self.__levels - 1) for block in previous_level_blocks: self.__split_block(block, split_dimension, cache_require, current_level_blocks) if cache_require: self.__leafs += current_level_blocks return current_level_blocks
! @brief Build new level of directory. @param[in] previous_level_blocks (list): BANG-blocks on the previous level. @param[in] level (uint): Level number that should be built. @return (list) New block on the specified level.
Below is the the instruction that describes the task: ### Input: ! @brief Build new level of directory. @param[in] previous_level_blocks (list): BANG-blocks on the previous level. @param[in] level (uint): Level number that should be built. @return (list) New block on the specified level. ### Response: def __build_level(self, previous_level_blocks, level): """! @brief Build new level of directory. @param[in] previous_level_blocks (list): BANG-blocks on the previous level. @param[in] level (uint): Level number that should be built. @return (list) New block on the specified level. """ current_level_blocks = [] split_dimension = level % len(self.__data[0]) cache_require = (level == self.__levels - 1) for block in previous_level_blocks: self.__split_block(block, split_dimension, cache_require, current_level_blocks) if cache_require: self.__leafs += current_level_blocks return current_level_blocks
def quaternion_inverse(quaternion): """Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) numpy.negative(q[1:], q[1:]) return q / numpy.dot(q, q)
Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True
Below is the the instruction that describes the task: ### Input: Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True ### Response: def quaternion_inverse(quaternion): """Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) numpy.negative(q[1:], q[1:]) return q / numpy.dot(q, q)
def decode(self, longval, nbits): """Decode the number to a string using the given statistics. Parameters ---------- longval : int The first part of an encoded tuple from encode nbits : int The second part of an encoded tuple from encode Returns ------- str The arithmetically decoded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.decode(16720586181, 34) 'align' """ val = Fraction(longval, long(1) << nbits) letters = [] probs_items = [ (char, minval, maxval) for (char, (minval, maxval)) in self._probs.items() ] char = '\x00' while True: for (char, minval, maxval) in probs_items: # noqa: B007 if minval <= val < maxval: break if char == '\x00': break letters.append(char) delta = maxval - minval val = (val - minval) / delta return ''.join(letters)
Decode the number to a string using the given statistics. Parameters ---------- longval : int The first part of an encoded tuple from encode nbits : int The second part of an encoded tuple from encode Returns ------- str The arithmetically decoded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.decode(16720586181, 34) 'align'
Below is the the instruction that describes the task: ### Input: Decode the number to a string using the given statistics. Parameters ---------- longval : int The first part of an encoded tuple from encode nbits : int The second part of an encoded tuple from encode Returns ------- str The arithmetically decoded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.decode(16720586181, 34) 'align' ### Response: def decode(self, longval, nbits): """Decode the number to a string using the given statistics. Parameters ---------- longval : int The first part of an encoded tuple from encode nbits : int The second part of an encoded tuple from encode Returns ------- str The arithmetically decoded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.decode(16720586181, 34) 'align' """ val = Fraction(longval, long(1) << nbits) letters = [] probs_items = [ (char, minval, maxval) for (char, (minval, maxval)) in self._probs.items() ] char = '\x00' while True: for (char, minval, maxval) in probs_items: # noqa: B007 if minval <= val < maxval: break if char == '\x00': break letters.append(char) delta = maxval - minval val = (val - minval) / delta return ''.join(letters)
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None): """ This function loads data from LSM and converts to GSSHA format """ if 'radiation' in gssha_var: conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] if gssha_var.startswith('direct_radiation') and not isinstance(lsm_var, basestring): # direct_radiation = (1-DIFFUSIVE_FRACION)*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100.0 self.data = ((1-diffusive_fraction)*global_radiation) elif gssha_var.startswith('diffusive_radiation') and not isinstance(lsm_var, basestring): # diffusive_radiation = DIFFUSIVE_FRACION*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100 self.data = (diffusive_fraction*global_radiation) elif isinstance(lsm_var, basestring): self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]) else: raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var)) elif gssha_var == 'relative_humidity' and not isinstance(lsm_var, str): ##CONVERSION ASSUMPTIONS: ##1) These equations are for liquid water and are less accurate below 0 deg C ##2) Not adjusting the pressure for the fact that the temperature ## and moisture measurements are given at 2 m AGL. ##Neither of these should have a significant impact on RH values ##given the uncertainty in the model values themselves. specific_humidity_var, pressure_var, temperature_var = lsm_var specific_humidity = self._load_lsm_data(specific_humidity_var) pressure = self._load_lsm_data(pressure_var) temperature = self._load_lsm_data(temperature_var) ##To compute the relative humidity at 2m, ##given T, Q (water vapor mixing ratio) at 2 m and PSFC (surface pressure): ##Es(saturation vapor pressure in Pa) ##Qs(saturation mixing ratio)=(0.622*es)/(PSFC-es) ##RH = 100*Q/Qs es = esat(temperature) self.data = 100 * specific_humidity/((0.622*es)/(pressure-es)) elif gssha_var == 'relative_humidity_dew': # https://software.ecmwf.int/wiki/display/CKB/Do+ERA+datasets+contain+parameters+for+near-surface+humidity # temperature in Kelvin # RH = 100 * es(Td)/es(T) dew_point_temp_var, temperature_var = lsm_var dew_point_temp = self._load_lsm_data(dew_point_temp_var) temperature = self._load_lsm_data(temperature_var) self.data = 100 * esat(dew_point_temp)/esat(temperature) elif gssha_var == 'wind_speed' and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables u_vector_var, v_vector_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] u_vector = self._load_lsm_data(u_vector_var, conversion_factor) v_vector = self._load_lsm_data(v_vector_var, conversion_factor) self.data = (xu.sqrt(u_vector**2 + v_vector**2)) elif 'precipitation' in gssha_var and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables rain_c_var, rain_nc_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] rain_c = self._load_lsm_data(rain_c_var, conversion_factor) rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor) self.data = rain_c + rain_nc else: self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type], self.netcdf_attributes[gssha_var].get('calc_4d_method'), self.netcdf_attributes[gssha_var].get('calc_4d_dim'), time_step=time_step) conversion_function = self.netcdf_attributes[gssha_var].get('conversion_function') if conversion_function: self.data.values = self.netcdf_attributes[gssha_var]['conversion_function'][load_type](self.data.values) if 'precipitation' in gssha_var: # NOTE: Precipitation is converted from mm/s to mm/hr # with the conversion factor when it is a rate. if 'units' in self.data.attrs: if self.data.attrs['units'] == 'm': # convert from m to mm self.data.values *= 1000 if load_type == 'ascii' or load_type == 'netcdf': # CONVERT TO INCREMENTAL if gssha_var == 'precipitation_acc': self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values, ((1, 0), (0, 0), (0, 0)), 'constant', constant_values=0) # CONVERT PRECIP TO RADAR (mm/hr) IN FILE if gssha_var == 'precipitation_inc' or gssha_var == 'precipitation_acc': # convert from mm to mm/hr time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, 'h') self.data.values /= time_step_hours # convert to dataset gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name'] self.data = self.data.to_dataset(name=gssha_data_var_name) self.data.rename( { self.lsm_lon_dim: 'x', self.lsm_lat_dim: 'y', self.lsm_lon_var: 'lon', self.lsm_lat_var: 'lat' }, inplace=True ) self.data.attrs = {'proj4': self.xd.lsm.projection.ExportToProj4()} self.data[gssha_data_var_name].attrs = { 'standard_name': self.netcdf_attributes[gssha_var]['standard_name'], 'long_name': self.netcdf_attributes[gssha_var]['long_name'], 'units': self.netcdf_attributes[gssha_var]['units'][load_type], }
This function loads data from LSM and converts to GSSHA format
Below is the the instruction that describes the task: ### Input: This function loads data from LSM and converts to GSSHA format ### Response: def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None): """ This function loads data from LSM and converts to GSSHA format """ if 'radiation' in gssha_var: conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] if gssha_var.startswith('direct_radiation') and not isinstance(lsm_var, basestring): # direct_radiation = (1-DIFFUSIVE_FRACION)*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100.0 self.data = ((1-diffusive_fraction)*global_radiation) elif gssha_var.startswith('diffusive_radiation') and not isinstance(lsm_var, basestring): # diffusive_radiation = DIFFUSIVE_FRACION*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100 self.data = (diffusive_fraction*global_radiation) elif isinstance(lsm_var, basestring): self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]) else: raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var)) elif gssha_var == 'relative_humidity' and not isinstance(lsm_var, str): ##CONVERSION ASSUMPTIONS: ##1) These equations are for liquid water and are less accurate below 0 deg C ##2) Not adjusting the pressure for the fact that the temperature ## and moisture measurements are given at 2 m AGL. ##Neither of these should have a significant impact on RH values ##given the uncertainty in the model values themselves. specific_humidity_var, pressure_var, temperature_var = lsm_var specific_humidity = self._load_lsm_data(specific_humidity_var) pressure = self._load_lsm_data(pressure_var) temperature = self._load_lsm_data(temperature_var) ##To compute the relative humidity at 2m, ##given T, Q (water vapor mixing ratio) at 2 m and PSFC (surface pressure): ##Es(saturation vapor pressure in Pa) ##Qs(saturation mixing ratio)=(0.622*es)/(PSFC-es) ##RH = 100*Q/Qs es = esat(temperature) self.data = 100 * specific_humidity/((0.622*es)/(pressure-es)) elif gssha_var == 'relative_humidity_dew': # https://software.ecmwf.int/wiki/display/CKB/Do+ERA+datasets+contain+parameters+for+near-surface+humidity # temperature in Kelvin # RH = 100 * es(Td)/es(T) dew_point_temp_var, temperature_var = lsm_var dew_point_temp = self._load_lsm_data(dew_point_temp_var) temperature = self._load_lsm_data(temperature_var) self.data = 100 * esat(dew_point_temp)/esat(temperature) elif gssha_var == 'wind_speed' and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables u_vector_var, v_vector_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] u_vector = self._load_lsm_data(u_vector_var, conversion_factor) v_vector = self._load_lsm_data(v_vector_var, conversion_factor) self.data = (xu.sqrt(u_vector**2 + v_vector**2)) elif 'precipitation' in gssha_var and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables rain_c_var, rain_nc_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] rain_c = self._load_lsm_data(rain_c_var, conversion_factor) rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor) self.data = rain_c + rain_nc else: self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type], self.netcdf_attributes[gssha_var].get('calc_4d_method'), self.netcdf_attributes[gssha_var].get('calc_4d_dim'), time_step=time_step) conversion_function = self.netcdf_attributes[gssha_var].get('conversion_function') if conversion_function: self.data.values = self.netcdf_attributes[gssha_var]['conversion_function'][load_type](self.data.values) if 'precipitation' in gssha_var: # NOTE: Precipitation is converted from mm/s to mm/hr # with the conversion factor when it is a rate. if 'units' in self.data.attrs: if self.data.attrs['units'] == 'm': # convert from m to mm self.data.values *= 1000 if load_type == 'ascii' or load_type == 'netcdf': # CONVERT TO INCREMENTAL if gssha_var == 'precipitation_acc': self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values, ((1, 0), (0, 0), (0, 0)), 'constant', constant_values=0) # CONVERT PRECIP TO RADAR (mm/hr) IN FILE if gssha_var == 'precipitation_inc' or gssha_var == 'precipitation_acc': # convert from mm to mm/hr time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, 'h') self.data.values /= time_step_hours # convert to dataset gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name'] self.data = self.data.to_dataset(name=gssha_data_var_name) self.data.rename( { self.lsm_lon_dim: 'x', self.lsm_lat_dim: 'y', self.lsm_lon_var: 'lon', self.lsm_lat_var: 'lat' }, inplace=True ) self.data.attrs = {'proj4': self.xd.lsm.projection.ExportToProj4()} self.data[gssha_data_var_name].attrs = { 'standard_name': self.netcdf_attributes[gssha_var]['standard_name'], 'long_name': self.netcdf_attributes[gssha_var]['long_name'], 'units': self.netcdf_attributes[gssha_var]['units'][load_type], }
def _get_parselypage(self, body): """extract the parsely-page meta content from a page""" parser = ParselyPageParser() ret = None try: parser.feed(body) except HTMLParseError: pass # ignore and hope we got ppage if parser.ppage is None: return ret = parser.ppage if ret: ret = {parser.original_unescape(k): parser.original_unescape(v) for k, v in iteritems(ret)} return ret
extract the parsely-page meta content from a page
Below is the the instruction that describes the task: ### Input: extract the parsely-page meta content from a page ### Response: def _get_parselypage(self, body): """extract the parsely-page meta content from a page""" parser = ParselyPageParser() ret = None try: parser.feed(body) except HTMLParseError: pass # ignore and hope we got ppage if parser.ppage is None: return ret = parser.ppage if ret: ret = {parser.original_unescape(k): parser.original_unescape(v) for k, v in iteritems(ret)} return ret
def message_received(self, message): """Message was received from device.""" # If the message identifer is outstanding, then someone is # waiting for the respone so we save it here identifier = message.identifier or 'type_' + str(message.type) if identifier in self._outstanding: outstanding = OutstandingMessage( self._outstanding[identifier].semaphore, message) self._outstanding[identifier] = outstanding self._outstanding[identifier].semaphore.release() else: asyncio.ensure_future(self._dispatch(message), loop=self.loop)
Message was received from device.
Below is the the instruction that describes the task: ### Input: Message was received from device. ### Response: def message_received(self, message): """Message was received from device.""" # If the message identifer is outstanding, then someone is # waiting for the respone so we save it here identifier = message.identifier or 'type_' + str(message.type) if identifier in self._outstanding: outstanding = OutstandingMessage( self._outstanding[identifier].semaphore, message) self._outstanding[identifier] = outstanding self._outstanding[identifier].semaphore.release() else: asyncio.ensure_future(self._dispatch(message), loop=self.loop)
def check_roles(self, account, aws_policies, aws_roles): """Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None` """ self.log.debug('Checking roles for {}'.format(account.account_name)) max_session_duration = self.dbconfig.get('role_timeout_in_hours', self.ns, 8) * 60 * 60 sess = get_aws_session(account) iam = sess.client('iam') # Build a list of default role policies and extra account specific role policies account_roles = copy.deepcopy(self.cfg_roles) if account.account_name in self.git_policies: for role in self.git_policies[account.account_name]: if role in account_roles: account_roles[role]['policies'] += list(self.git_policies[account.account_name][role].keys()) for role_name, data in list(account_roles.items()): if role_name not in aws_roles: iam.create_role( Path='/', RoleName=role_name, AssumeRolePolicyDocument=json.dumps(data['trust'], indent=4), MaxSessionDuration=max_session_duration ) self.log.info('Created role {}/{}'.format(account.account_name, role_name)) else: try: if aws_roles[role_name]['MaxSessionDuration'] != max_session_duration: iam.update_role( RoleName=aws_roles[role_name]['RoleName'], MaxSessionDuration=max_session_duration ) self.log.info('Adjusted MaxSessionDuration for role {} in account {} to {} seconds'.format( role_name, account.account_name, max_session_duration )) except ClientError: self.log.exception('Unable to adjust MaxSessionDuration for role {} in account {}'.format( role_name, account.account_name )) aws_role_policies = [x['PolicyName'] for x in iam.list_attached_role_policies( RoleName=role_name)['AttachedPolicies'] ] aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)['PolicyNames'] cfg_role_policies = data['policies'] missing_policies = list(set(cfg_role_policies) - set(aws_role_policies)) extra_policies = list(set(aws_role_policies) - set(cfg_role_policies)) if aws_role_inline_policies: self.log.info('IAM Role {} on {} has the following inline policies: {}'.format( role_name, account.account_name, ', '.join(aws_role_inline_policies) )) if self.dbconfig.get('delete_inline_policies', self.ns, False) and self.manage_roles: for policy in aws_role_inline_policies: iam.delete_role_policy(RoleName=role_name, PolicyName=policy) auditlog( event='iam.check_roles.delete_inline_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policy': policy } ) if missing_policies: self.log.info('IAM Role {} on {} is missing the following policies: {}'.format( role_name, account.account_name, ', '.join(missing_policies) )) if self.manage_roles: for policy in missing_policies: iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy]['Arn']) auditlog( event='iam.check_roles.attach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': aws_policies[policy]['Arn'] } ) if extra_policies: self.log.info('IAM Role {} on {} has the following extra policies applied: {}'.format( role_name, account.account_name, ', '.join(extra_policies) )) for policy in extra_policies: if policy in aws_policies: polArn = aws_policies[policy]['Arn'] elif policy in self.aws_managed_policies: polArn = self.aws_managed_policies[policy]['Arn'] else: polArn = None self.log.info('IAM Role {} on {} has an unknown policy attached: {}'.format( role_name, account.account_name, policy )) if self.manage_roles and polArn: iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn) auditlog( event='iam.check_roles.detach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': polArn } )
Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None`
Below is the the instruction that describes the task: ### Input: Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None` ### Response: def check_roles(self, account, aws_policies, aws_roles): """Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None` """ self.log.debug('Checking roles for {}'.format(account.account_name)) max_session_duration = self.dbconfig.get('role_timeout_in_hours', self.ns, 8) * 60 * 60 sess = get_aws_session(account) iam = sess.client('iam') # Build a list of default role policies and extra account specific role policies account_roles = copy.deepcopy(self.cfg_roles) if account.account_name in self.git_policies: for role in self.git_policies[account.account_name]: if role in account_roles: account_roles[role]['policies'] += list(self.git_policies[account.account_name][role].keys()) for role_name, data in list(account_roles.items()): if role_name not in aws_roles: iam.create_role( Path='/', RoleName=role_name, AssumeRolePolicyDocument=json.dumps(data['trust'], indent=4), MaxSessionDuration=max_session_duration ) self.log.info('Created role {}/{}'.format(account.account_name, role_name)) else: try: if aws_roles[role_name]['MaxSessionDuration'] != max_session_duration: iam.update_role( RoleName=aws_roles[role_name]['RoleName'], MaxSessionDuration=max_session_duration ) self.log.info('Adjusted MaxSessionDuration for role {} in account {} to {} seconds'.format( role_name, account.account_name, max_session_duration )) except ClientError: self.log.exception('Unable to adjust MaxSessionDuration for role {} in account {}'.format( role_name, account.account_name )) aws_role_policies = [x['PolicyName'] for x in iam.list_attached_role_policies( RoleName=role_name)['AttachedPolicies'] ] aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)['PolicyNames'] cfg_role_policies = data['policies'] missing_policies = list(set(cfg_role_policies) - set(aws_role_policies)) extra_policies = list(set(aws_role_policies) - set(cfg_role_policies)) if aws_role_inline_policies: self.log.info('IAM Role {} on {} has the following inline policies: {}'.format( role_name, account.account_name, ', '.join(aws_role_inline_policies) )) if self.dbconfig.get('delete_inline_policies', self.ns, False) and self.manage_roles: for policy in aws_role_inline_policies: iam.delete_role_policy(RoleName=role_name, PolicyName=policy) auditlog( event='iam.check_roles.delete_inline_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policy': policy } ) if missing_policies: self.log.info('IAM Role {} on {} is missing the following policies: {}'.format( role_name, account.account_name, ', '.join(missing_policies) )) if self.manage_roles: for policy in missing_policies: iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy]['Arn']) auditlog( event='iam.check_roles.attach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': aws_policies[policy]['Arn'] } ) if extra_policies: self.log.info('IAM Role {} on {} has the following extra policies applied: {}'.format( role_name, account.account_name, ', '.join(extra_policies) )) for policy in extra_policies: if policy in aws_policies: polArn = aws_policies[policy]['Arn'] elif policy in self.aws_managed_policies: polArn = self.aws_managed_policies[policy]['Arn'] else: polArn = None self.log.info('IAM Role {} on {} has an unknown policy attached: {}'.format( role_name, account.account_name, policy )) if self.manage_roles and polArn: iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn) auditlog( event='iam.check_roles.detach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': polArn } )
def process_image(self, image, image_format, save_kwargs={}): """Return a BytesIO instance of `image` with inverted colors.""" imagefile = BytesIO() inv_image = ImageOps.invert(image) inv_image.save( imagefile, **save_kwargs ) return imagefile
Return a BytesIO instance of `image` with inverted colors.
Below is the the instruction that describes the task: ### Input: Return a BytesIO instance of `image` with inverted colors. ### Response: def process_image(self, image, image_format, save_kwargs={}): """Return a BytesIO instance of `image` with inverted colors.""" imagefile = BytesIO() inv_image = ImageOps.invert(image) inv_image.save( imagefile, **save_kwargs ) return imagefile
def jd2dt(jd): """Convert julian date to datetime """ n = int(round(float(jd))) a = n + 32044 b = (4*a + 3)//146097 c = a - (146097*b)//4 d = (4*c + 3)//1461 e = c - (1461*d)//4 m = (5*e + 2)//153 day = e + 1 - (153*m + 2)//5 month = m + 3 - 12*(m//10) year = 100*b + d - 4800 + m/10 tfrac = 0.5 + float(jd) - n tfrac_s = 86400.0 * tfrac minfrac, hours = np.modf(tfrac_s / 3600.) secfrac, minutes = np.modf(minfrac * 60.) microsec, seconds = np.modf(secfrac * 60.) return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6))
Convert julian date to datetime
Below is the the instruction that describes the task: ### Input: Convert julian date to datetime ### Response: def jd2dt(jd): """Convert julian date to datetime """ n = int(round(float(jd))) a = n + 32044 b = (4*a + 3)//146097 c = a - (146097*b)//4 d = (4*c + 3)//1461 e = c - (1461*d)//4 m = (5*e + 2)//153 day = e + 1 - (153*m + 2)//5 month = m + 3 - 12*(m//10) year = 100*b + d - 4800 + m/10 tfrac = 0.5 + float(jd) - n tfrac_s = 86400.0 * tfrac minfrac, hours = np.modf(tfrac_s / 3600.) secfrac, minutes = np.modf(minfrac * 60.) microsec, seconds = np.modf(secfrac * 60.) return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6))
def register_regex_entity(self, regex_str, domain=0): """ A regular expression making use of python named group expressions. Example: (?P<Artist>.*) Args: regex_str(str): a string representing a regular expression as defined above domain(str): a string representing the domain you wish to add the entity to """ if domain not in self.domains: self.register_domain(domain=domain) self.domains[domain].register_regex_entity(regex_str=regex_str)
A regular expression making use of python named group expressions. Example: (?P<Artist>.*) Args: regex_str(str): a string representing a regular expression as defined above domain(str): a string representing the domain you wish to add the entity to
Below is the the instruction that describes the task: ### Input: A regular expression making use of python named group expressions. Example: (?P<Artist>.*) Args: regex_str(str): a string representing a regular expression as defined above domain(str): a string representing the domain you wish to add the entity to ### Response: def register_regex_entity(self, regex_str, domain=0): """ A regular expression making use of python named group expressions. Example: (?P<Artist>.*) Args: regex_str(str): a string representing a regular expression as defined above domain(str): a string representing the domain you wish to add the entity to """ if domain not in self.domains: self.register_domain(domain=domain) self.domains[domain].register_regex_entity(regex_str=regex_str)
def find_DST(): """Find where this package should be installed to. """ if SYS_NAME == "Windows": return os.path.join(site.getsitepackages()[1], PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: return os.path.join(site.getsitepackages()[0], PKG_NAME)
Find where this package should be installed to.
Below is the the instruction that describes the task: ### Input: Find where this package should be installed to. ### Response: def find_DST(): """Find where this package should be installed to. """ if SYS_NAME == "Windows": return os.path.join(site.getsitepackages()[1], PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: return os.path.join(site.getsitepackages()[0], PKG_NAME)
def artist_update(self, artist_id, name=None, other_names_comma=None, group_name=None, url_string=None, body=None): """Function to update artists (Requires login) (UNTESTED). Parameters: artist_id (str): name (str): Artist name. other_names_comma (str): List of alternative names for this artist, comma delimited. group_name (str): The name of the group this artist belongs to. url_string (str): List of URLs associated with this artist, whitespace or newline delimited. body (str): DText that will be used to create/update a wiki entry at the same time. """ params = { 'artist[name]': name, 'artist[other_names_comma]': other_names_comma, 'artist[group_name]': group_name, 'artist[url_string]': url_string, 'artist[body]': body } return self .get('artists/{0}.json'.format(artist_id), params, method='PUT', auth=True)
Function to update artists (Requires login) (UNTESTED). Parameters: artist_id (str): name (str): Artist name. other_names_comma (str): List of alternative names for this artist, comma delimited. group_name (str): The name of the group this artist belongs to. url_string (str): List of URLs associated with this artist, whitespace or newline delimited. body (str): DText that will be used to create/update a wiki entry at the same time.
Below is the the instruction that describes the task: ### Input: Function to update artists (Requires login) (UNTESTED). Parameters: artist_id (str): name (str): Artist name. other_names_comma (str): List of alternative names for this artist, comma delimited. group_name (str): The name of the group this artist belongs to. url_string (str): List of URLs associated with this artist, whitespace or newline delimited. body (str): DText that will be used to create/update a wiki entry at the same time. ### Response: def artist_update(self, artist_id, name=None, other_names_comma=None, group_name=None, url_string=None, body=None): """Function to update artists (Requires login) (UNTESTED). Parameters: artist_id (str): name (str): Artist name. other_names_comma (str): List of alternative names for this artist, comma delimited. group_name (str): The name of the group this artist belongs to. url_string (str): List of URLs associated with this artist, whitespace or newline delimited. body (str): DText that will be used to create/update a wiki entry at the same time. """ params = { 'artist[name]': name, 'artist[other_names_comma]': other_names_comma, 'artist[group_name]': group_name, 'artist[url_string]': url_string, 'artist[body]': body } return self .get('artists/{0}.json'.format(artist_id), params, method='PUT', auth=True)
def get_attachment(self, ticket_id, attachment_id): """ Get attachment. :param ticket_id: ID of ticket :param attachment_id: ID of attachment for obtain :returns: Attachment as dictionary with these keys: * Transaction * ContentType * Parent * Creator * Created * Filename * Content (bytes type) * Headers * MessageId * ContentEncoding * id * Subject All these fields are strings, just 'Headers' holds another dictionary with attachment headers as strings e.g.: * Delivered-To * From * Return-Path * Content-Length * To * X-Seznam-User * X-QM-Mark * Domainkey-Signature * RT-Message-ID * X-RT-Incoming-Encryption * X-Original-To * Message-ID * X-Spam-Status * In-Reply-To * Date * Received * X-Country * X-Spam-Checker-Version * X-Abuse * MIME-Version * Content-Type * Subject .. warning:: Content-Length parameter is set after opening ticket in web interface! Set of headers available depends on mailservers sending emails not on Request Tracker! Returns None if ticket or attachment does not exist. :raises UnexpectedMessageFormat: Unexpected format of returned message. """ msg = self.__request('ticket/{}/attachments/{}'.format(str(ticket_id), str(attachment_id)), text_response=False) msg = msg.split(b'\n') if (len(msg) > 2) and (self.RE_PATTERNS['invalid_attachment_pattern_bytes'].match(msg[2]) or self.RE_PATTERNS['does_not_exist_pattern_bytes'].match(msg[2])): return None msg = msg[2:] head_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['headers_pattern_bytes'].match(m)] head_id = head_matching[0] if head_matching else None if not head_id: raise UnexpectedMessageFormat('Unexpected headers part of attachment entry. \ Missing line starting with `Headers:`.') msg[head_id] = re.sub(b'^Headers: (.*)$', r'\1', msg[head_id]) cont_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['content_pattern_bytes'].match(m)] cont_id = cont_matching[0] if cont_matching else None if not cont_matching: raise UnexpectedMessageFormat('Unexpected content part of attachment entry. \ Missing line starting with `Content:`.') pairs = {} for i in range(head_id): if b': ' in msg[i]: header, content = msg[i].split(b': ', 1) pairs[header.strip().decode('utf-8')] = content.strip().decode('utf-8') headers = {} for i in range(head_id, cont_id): if b': ' in msg[i]: header, content = msg[i].split(b': ', 1) headers[header.strip().decode('utf-8')] = content.strip().decode('utf-8') pairs['Headers'] = headers content = msg[cont_id][9:] for i in range(cont_id + 1, len(msg)): if msg[i][:9] == (b' ' * 9): content += b'\n' + msg[i][9:] pairs['Content'] = content return pairs
Get attachment. :param ticket_id: ID of ticket :param attachment_id: ID of attachment for obtain :returns: Attachment as dictionary with these keys: * Transaction * ContentType * Parent * Creator * Created * Filename * Content (bytes type) * Headers * MessageId * ContentEncoding * id * Subject All these fields are strings, just 'Headers' holds another dictionary with attachment headers as strings e.g.: * Delivered-To * From * Return-Path * Content-Length * To * X-Seznam-User * X-QM-Mark * Domainkey-Signature * RT-Message-ID * X-RT-Incoming-Encryption * X-Original-To * Message-ID * X-Spam-Status * In-Reply-To * Date * Received * X-Country * X-Spam-Checker-Version * X-Abuse * MIME-Version * Content-Type * Subject .. warning:: Content-Length parameter is set after opening ticket in web interface! Set of headers available depends on mailservers sending emails not on Request Tracker! Returns None if ticket or attachment does not exist. :raises UnexpectedMessageFormat: Unexpected format of returned message.
Below is the the instruction that describes the task: ### Input: Get attachment. :param ticket_id: ID of ticket :param attachment_id: ID of attachment for obtain :returns: Attachment as dictionary with these keys: * Transaction * ContentType * Parent * Creator * Created * Filename * Content (bytes type) * Headers * MessageId * ContentEncoding * id * Subject All these fields are strings, just 'Headers' holds another dictionary with attachment headers as strings e.g.: * Delivered-To * From * Return-Path * Content-Length * To * X-Seznam-User * X-QM-Mark * Domainkey-Signature * RT-Message-ID * X-RT-Incoming-Encryption * X-Original-To * Message-ID * X-Spam-Status * In-Reply-To * Date * Received * X-Country * X-Spam-Checker-Version * X-Abuse * MIME-Version * Content-Type * Subject .. warning:: Content-Length parameter is set after opening ticket in web interface! Set of headers available depends on mailservers sending emails not on Request Tracker! Returns None if ticket or attachment does not exist. :raises UnexpectedMessageFormat: Unexpected format of returned message. ### Response: def get_attachment(self, ticket_id, attachment_id): """ Get attachment. :param ticket_id: ID of ticket :param attachment_id: ID of attachment for obtain :returns: Attachment as dictionary with these keys: * Transaction * ContentType * Parent * Creator * Created * Filename * Content (bytes type) * Headers * MessageId * ContentEncoding * id * Subject All these fields are strings, just 'Headers' holds another dictionary with attachment headers as strings e.g.: * Delivered-To * From * Return-Path * Content-Length * To * X-Seznam-User * X-QM-Mark * Domainkey-Signature * RT-Message-ID * X-RT-Incoming-Encryption * X-Original-To * Message-ID * X-Spam-Status * In-Reply-To * Date * Received * X-Country * X-Spam-Checker-Version * X-Abuse * MIME-Version * Content-Type * Subject .. warning:: Content-Length parameter is set after opening ticket in web interface! Set of headers available depends on mailservers sending emails not on Request Tracker! Returns None if ticket or attachment does not exist. :raises UnexpectedMessageFormat: Unexpected format of returned message. """ msg = self.__request('ticket/{}/attachments/{}'.format(str(ticket_id), str(attachment_id)), text_response=False) msg = msg.split(b'\n') if (len(msg) > 2) and (self.RE_PATTERNS['invalid_attachment_pattern_bytes'].match(msg[2]) or self.RE_PATTERNS['does_not_exist_pattern_bytes'].match(msg[2])): return None msg = msg[2:] head_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['headers_pattern_bytes'].match(m)] head_id = head_matching[0] if head_matching else None if not head_id: raise UnexpectedMessageFormat('Unexpected headers part of attachment entry. \ Missing line starting with `Headers:`.') msg[head_id] = re.sub(b'^Headers: (.*)$', r'\1', msg[head_id]) cont_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['content_pattern_bytes'].match(m)] cont_id = cont_matching[0] if cont_matching else None if not cont_matching: raise UnexpectedMessageFormat('Unexpected content part of attachment entry. \ Missing line starting with `Content:`.') pairs = {} for i in range(head_id): if b': ' in msg[i]: header, content = msg[i].split(b': ', 1) pairs[header.strip().decode('utf-8')] = content.strip().decode('utf-8') headers = {} for i in range(head_id, cont_id): if b': ' in msg[i]: header, content = msg[i].split(b': ', 1) headers[header.strip().decode('utf-8')] = content.strip().decode('utf-8') pairs['Headers'] = headers content = msg[cont_id][9:] for i in range(cont_id + 1, len(msg)): if msg[i][:9] == (b' ' * 9): content += b'\n' + msg[i][9:] pairs['Content'] = content return pairs
def load(): # type: () -> None """ Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML. """ with within_proj_dir(): if os.path.exists('pelconf.yaml'): load_yaml_config('pelconf.yaml') if os.path.exists('pelconf.py'): load_py_config('pelconf.py')
Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML.
Below is the the instruction that describes the task: ### Input: Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML. ### Response: def load(): # type: () -> None """ Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML. """ with within_proj_dir(): if os.path.exists('pelconf.yaml'): load_yaml_config('pelconf.yaml') if os.path.exists('pelconf.py'): load_py_config('pelconf.py')
def photbw(self, floor=0): """Calculate :ref:`pysynphot-formula-bandw`. .. note:: For backward-compatibility with IRAF STSDAS SYNPHOT only. Parameters ---------- floor : float Same as :meth:`rmswidth`. Returns ------- ans : float RMS band width (deprecated). """ mywaveunits = self.waveunits.name self.convert('angstroms') wave = self.wave thru = self.throughput self.convert(mywaveunits) # calculate the average wavelength num = self.trapezoidIntegration(wave, thru * N.log(wave) / wave) den = self.trapezoidIntegration(wave, thru / wave) if num == 0 or den == 0: return 0.0 avg_wave = N.exp(num/den) if floor != 0: idx = N.where(thru >= floor) wave = wave[idx] thru = thru[idx] # calcualte the rms width integrand = thru * N.log(wave / avg_wave)**2 / wave num = self.trapezoidIntegration(wave, integrand) if num == 0 or den == 0: return 0.0 return avg_wave * N.sqrt(num/den)
Calculate :ref:`pysynphot-formula-bandw`. .. note:: For backward-compatibility with IRAF STSDAS SYNPHOT only. Parameters ---------- floor : float Same as :meth:`rmswidth`. Returns ------- ans : float RMS band width (deprecated).
Below is the the instruction that describes the task: ### Input: Calculate :ref:`pysynphot-formula-bandw`. .. note:: For backward-compatibility with IRAF STSDAS SYNPHOT only. Parameters ---------- floor : float Same as :meth:`rmswidth`. Returns ------- ans : float RMS band width (deprecated). ### Response: def photbw(self, floor=0): """Calculate :ref:`pysynphot-formula-bandw`. .. note:: For backward-compatibility with IRAF STSDAS SYNPHOT only. Parameters ---------- floor : float Same as :meth:`rmswidth`. Returns ------- ans : float RMS band width (deprecated). """ mywaveunits = self.waveunits.name self.convert('angstroms') wave = self.wave thru = self.throughput self.convert(mywaveunits) # calculate the average wavelength num = self.trapezoidIntegration(wave, thru * N.log(wave) / wave) den = self.trapezoidIntegration(wave, thru / wave) if num == 0 or den == 0: return 0.0 avg_wave = N.exp(num/den) if floor != 0: idx = N.where(thru >= floor) wave = wave[idx] thru = thru[idx] # calcualte the rms width integrand = thru * N.log(wave / avg_wave)**2 / wave num = self.trapezoidIntegration(wave, integrand) if num == 0 or den == 0: return 0.0 return avg_wave * N.sqrt(num/den)
def potential_purviews(self, direction, mechanism, purviews=False): """Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. """ system = self.system[direction] return [ purview for purview in system.potential_purviews( direction, mechanism, purviews) if set(purview).issubset(self.purview_indices(direction)) ]
Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest.
Below is the the instruction that describes the task: ### Input: Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. ### Response: def potential_purviews(self, direction, mechanism, purviews=False): """Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. """ system = self.system[direction] return [ purview for purview in system.potential_purviews( direction, mechanism, purviews) if set(purview).issubset(self.purview_indices(direction)) ]
def update_cache_settings(self, service_id, version_number, name_key, **kwargs): """Update a specific cache settings object.""" body = self._formdata(kwargs, FastlyCacheSettings.FIELDS) content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyCacheSettings(self, content)
Update a specific cache settings object.
Below is the the instruction that describes the task: ### Input: Update a specific cache settings object. ### Response: def update_cache_settings(self, service_id, version_number, name_key, **kwargs): """Update a specific cache settings object.""" body = self._formdata(kwargs, FastlyCacheSettings.FIELDS) content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyCacheSettings(self, content)
def user_present(name, uid, password, channel=14, callback=False, link_auth=True, ipmi_msg=True, privilege_level='administrator', **kwargs): ''' Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} org_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) change = False if org_user['access']['callback'] != callback: change = True if org_user['access']['link_auth'] != link_auth: change = True if org_user['access']['ipmi_msg'] != ipmi_msg: change = True if org_user['access']['privilege_level'] != privilege_level: change = True if __salt__['ipmi.set_user_password'](uid, mode='test_password', password=password, **kwargs) is False: change = True if change is False: ret['result'] = True ret['comment'] = 'user already present' return ret if __opts__['test']: ret['comment'] = 'would (re)create user' ret['result'] = None ret['changes'] = {'old': org_user, 'new': name} return ret __salt__['ipmi.ensure_user'](uid, name, password, channel, callback, link_auth, ipmi_msg, privilege_level, **kwargs) current_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) ret['comment'] = '(re)created user' ret['result'] = True ret['changes'] = {'old': org_user, 'new': current_user} return ret
Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None
Below is the the instruction that describes the task: ### Input: Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None ### Response: def user_present(name, uid, password, channel=14, callback=False, link_auth=True, ipmi_msg=True, privilege_level='administrator', **kwargs): ''' Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} org_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) change = False if org_user['access']['callback'] != callback: change = True if org_user['access']['link_auth'] != link_auth: change = True if org_user['access']['ipmi_msg'] != ipmi_msg: change = True if org_user['access']['privilege_level'] != privilege_level: change = True if __salt__['ipmi.set_user_password'](uid, mode='test_password', password=password, **kwargs) is False: change = True if change is False: ret['result'] = True ret['comment'] = 'user already present' return ret if __opts__['test']: ret['comment'] = 'would (re)create user' ret['result'] = None ret['changes'] = {'old': org_user, 'new': name} return ret __salt__['ipmi.ensure_user'](uid, name, password, channel, callback, link_auth, ipmi_msg, privilege_level, **kwargs) current_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) ret['comment'] = '(re)created user' ret['result'] = True ret['changes'] = {'old': org_user, 'new': current_user} return ret
def properties(self): # type: () -> list """ Returns: (list[str]) List of public properties """ _type = type(self) return [_property for _property in dir(_type) if self._is_property(_property)]
Returns: (list[str]) List of public properties
Below is the the instruction that describes the task: ### Input: Returns: (list[str]) List of public properties ### Response: def properties(self): # type: () -> list """ Returns: (list[str]) List of public properties """ _type = type(self) return [_property for _property in dir(_type) if self._is_property(_property)]
def qualified_note_rate(pianoroll, threshold=2): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.""" _validate_pianoroll(pianoroll) if np.issubdtype(pianoroll.dtype, np.bool_): pianoroll = pianoroll.astype(np.uint8) padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant') diff = np.diff(padded, axis=0).reshape(-1) onsets = (diff > 0).nonzero()[0] offsets = (diff < 0).nonzero()[0] n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold) return n_qualified_notes / len(onsets)
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.
Below is the the instruction that describes the task: ### Input: Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll. ### Response: def qualified_note_rate(pianoroll, threshold=2): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.""" _validate_pianoroll(pianoroll) if np.issubdtype(pianoroll.dtype, np.bool_): pianoroll = pianoroll.astype(np.uint8) padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant') diff = np.diff(padded, axis=0).reshape(-1) onsets = (diff > 0).nonzero()[0] offsets = (diff < 0).nonzero()[0] n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold) return n_qualified_notes / len(onsets)
def main (args): """Usage: print_sedml input-filename """ if len(args) != 2: print(main.__doc__) sys.exit(1) doc = libsedml.readSedML(args[1]); if ( doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print doc.getErrorLog().toString(); sys.exit(2); print 'The document has {0}" simulation(s).'.format(doc.getNumSimulations()); for i in range(0, doc.getNumSimulations()): current = doc.getSimulation(i); if (current.getTypeCode() == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE): tc = current; kisaoid="none" if tc.isSetAlgorithm(): kisaoid=tc.getAlgorithm().getKisaoID() print "\tTimecourse id=", tc.getId()," start=",tc.getOutputStartTime()," end=",tc.getOutputEndTime()," numPoints=",tc.getNumberOfPoints()," kisao=",kisaoid,"\n"; else: print "\tUncountered unknown simulation. ",current.getId(),"\n"; print "\n" print "The document has ",doc.getNumModels() , " model(s)." , "\n"; for i in range(0,doc.getNumModels()): current = doc.getModel(i); print "\tModel id=" , current.getId() , " language=" , current.getLanguage() , " source=" , current.getSource() , " numChanges=" , current.getNumChanges() , "\n"; print "\n"; print "The document has " , doc.getNumTasks() , " task(s)." , "\n"; for i in range(0,doc.getNumTasks()): current = doc.getTask(i); print "\tTask id=" , current.getId() , " model=" , current.getModelReference() , " sim=" , current.getSimulationReference() , "\n"; print "\n"; print "The document has " , doc.getNumDataGenerators() , " datagenerators(s)." , "\n"; for i in range( 0, doc.getNumDataGenerators()): current = doc.getDataGenerator(i); print "\tDG id=" , current.getId() , " math=" , libsedml.formulaToString(current.getMath()) , "\n"; print "\n"; print "The document has " , doc.getNumOutputs() , " output(s)." , "\n"; for i in range (0, doc.getNumOutputs()): current = doc.getOutput(i); tc = current.getTypeCode(); if tc == libsedml.SEDML_OUTPUT_REPORT: r = (current); print "\tReport id=" , current.getId() , " numDataSets=" , r.getNumDataSets() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT2D: p = (current); print "\tPlot2d id=" , current.getId() , " numCurves=" , p.getNumCurves() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT3D: p = (current); print "\tPlot3d id=" , current.getId() , " numSurfaces=" , p.getNumSurfaces() , "\n"; else: print "\tEncountered unknown output " , current.getId() , "\n";
Usage: print_sedml input-filename
Below is the the instruction that describes the task: ### Input: Usage: print_sedml input-filename ### Response: def main (args): """Usage: print_sedml input-filename """ if len(args) != 2: print(main.__doc__) sys.exit(1) doc = libsedml.readSedML(args[1]); if ( doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0): print doc.getErrorLog().toString(); sys.exit(2); print 'The document has {0}" simulation(s).'.format(doc.getNumSimulations()); for i in range(0, doc.getNumSimulations()): current = doc.getSimulation(i); if (current.getTypeCode() == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE): tc = current; kisaoid="none" if tc.isSetAlgorithm(): kisaoid=tc.getAlgorithm().getKisaoID() print "\tTimecourse id=", tc.getId()," start=",tc.getOutputStartTime()," end=",tc.getOutputEndTime()," numPoints=",tc.getNumberOfPoints()," kisao=",kisaoid,"\n"; else: print "\tUncountered unknown simulation. ",current.getId(),"\n"; print "\n" print "The document has ",doc.getNumModels() , " model(s)." , "\n"; for i in range(0,doc.getNumModels()): current = doc.getModel(i); print "\tModel id=" , current.getId() , " language=" , current.getLanguage() , " source=" , current.getSource() , " numChanges=" , current.getNumChanges() , "\n"; print "\n"; print "The document has " , doc.getNumTasks() , " task(s)." , "\n"; for i in range(0,doc.getNumTasks()): current = doc.getTask(i); print "\tTask id=" , current.getId() , " model=" , current.getModelReference() , " sim=" , current.getSimulationReference() , "\n"; print "\n"; print "The document has " , doc.getNumDataGenerators() , " datagenerators(s)." , "\n"; for i in range( 0, doc.getNumDataGenerators()): current = doc.getDataGenerator(i); print "\tDG id=" , current.getId() , " math=" , libsedml.formulaToString(current.getMath()) , "\n"; print "\n"; print "The document has " , doc.getNumOutputs() , " output(s)." , "\n"; for i in range (0, doc.getNumOutputs()): current = doc.getOutput(i); tc = current.getTypeCode(); if tc == libsedml.SEDML_OUTPUT_REPORT: r = (current); print "\tReport id=" , current.getId() , " numDataSets=" , r.getNumDataSets() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT2D: p = (current); print "\tPlot2d id=" , current.getId() , " numCurves=" , p.getNumCurves() , "\n"; elif tc == libsedml.SEDML_OUTPUT_PLOT3D: p = (current); print "\tPlot3d id=" , current.getId() , " numSurfaces=" , p.getNumSurfaces() , "\n"; else: print "\tEncountered unknown output " , current.getId() , "\n";
def convert(self, values, nan_rep, encoding, errors): """ set the values from this selection: take = take ownership """ # values is a recarray if values.dtype.fields is not None: values = values[self.cname] values = _maybe_convert(values, self.kind, encoding, errors) kwargs = dict() if self.freq is not None: kwargs['freq'] = _ensure_decoded(self.freq) if self.index_name is not None: kwargs['name'] = _ensure_decoded(self.index_name) # making an Index instance could throw a number of different errors try: self.values = Index(values, **kwargs) except Exception: # noqa: E722 # if the output freq is different that what we recorded, # it should be None (see also 'doc example part 2') if 'freq' in kwargs: kwargs['freq'] = None self.values = Index(values, **kwargs) self.values = _set_tz(self.values, self.tz) return self
set the values from this selection: take = take ownership
Below is the the instruction that describes the task: ### Input: set the values from this selection: take = take ownership ### Response: def convert(self, values, nan_rep, encoding, errors): """ set the values from this selection: take = take ownership """ # values is a recarray if values.dtype.fields is not None: values = values[self.cname] values = _maybe_convert(values, self.kind, encoding, errors) kwargs = dict() if self.freq is not None: kwargs['freq'] = _ensure_decoded(self.freq) if self.index_name is not None: kwargs['name'] = _ensure_decoded(self.index_name) # making an Index instance could throw a number of different errors try: self.values = Index(values, **kwargs) except Exception: # noqa: E722 # if the output freq is different that what we recorded, # it should be None (see also 'doc example part 2') if 'freq' in kwargs: kwargs['freq'] = None self.values = Index(values, **kwargs) self.values = _set_tz(self.values, self.tz) return self
def _set_sip_ipv4_address(self, v, load=False): """ Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""", 'defined-type': "brocade-bgp:sip-ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""", }) self.__sip_ipv4_address = t if hasattr(self, '_set'): self._set()
Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly.
Below is the the instruction that describes the task: ### Input: Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly. ### Response: def _set_sip_ipv4_address(self, v, load=False): """ Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""", 'defined-type': "brocade-bgp:sip-ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""", }) self.__sip_ipv4_address = t if hasattr(self, '_set'): self._set()
def fill_translation_cache(instance): """ Fill the translation cache using information received in the instance objects as extra fields. You can not do this in post_init because the extra fields are assigned by QuerySet.iterator after model initialization. """ if hasattr(instance, '_translation_cache'): # do not refill the cache return instance._translation_cache = {} # unsafed instances cannot have translations if not instance.pk: return for language_code in get_language_code_list(): # see if translation for language_code was in the query field_alias = get_translated_field_alias('code', language_code) if getattr(instance, field_alias, None) is not None: field_names = [f.attname for f in instance._meta.translation_model._meta.fields] # if so, create a translation object and put it in the cache field_data = {} for fname in field_names: field_data[fname] = getattr(instance, get_translated_field_alias(fname, language_code)) translation = instance._meta.translation_model(**field_data) instance._translation_cache[language_code] = translation # In some situations an (existing in the DB) object is loaded # without using the normal QuerySet. In such case fallback to # loading the translations using a separate query. # Unfortunately, this is indistinguishable from the situation when # an object does not have any translations. Oh well, we'll have # to live with this for the time being. if len(instance._translation_cache.keys()) == 0: for translation in instance.translations.all(): instance._translation_cache[translation.language_code] = translation
Fill the translation cache using information received in the instance objects as extra fields. You can not do this in post_init because the extra fields are assigned by QuerySet.iterator after model initialization.
Below is the the instruction that describes the task: ### Input: Fill the translation cache using information received in the instance objects as extra fields. You can not do this in post_init because the extra fields are assigned by QuerySet.iterator after model initialization. ### Response: def fill_translation_cache(instance): """ Fill the translation cache using information received in the instance objects as extra fields. You can not do this in post_init because the extra fields are assigned by QuerySet.iterator after model initialization. """ if hasattr(instance, '_translation_cache'): # do not refill the cache return instance._translation_cache = {} # unsafed instances cannot have translations if not instance.pk: return for language_code in get_language_code_list(): # see if translation for language_code was in the query field_alias = get_translated_field_alias('code', language_code) if getattr(instance, field_alias, None) is not None: field_names = [f.attname for f in instance._meta.translation_model._meta.fields] # if so, create a translation object and put it in the cache field_data = {} for fname in field_names: field_data[fname] = getattr(instance, get_translated_field_alias(fname, language_code)) translation = instance._meta.translation_model(**field_data) instance._translation_cache[language_code] = translation # In some situations an (existing in the DB) object is loaded # without using the normal QuerySet. In such case fallback to # loading the translations using a separate query. # Unfortunately, this is indistinguishable from the situation when # an object does not have any translations. Oh well, we'll have # to live with this for the time being. if len(instance._translation_cache.keys()) == 0: for translation in instance.translations.all(): instance._translation_cache[translation.language_code] = translation
def lasts(iterable, items=1, default=None): # type: (Iterable[T], int, T) -> Iterable[T] """ Lazily return the last x items from this iterable or default. """ last_items = deque(iterable, maxlen=items) for _ in range(items - len(last_items)): yield default for y in last_items: yield y
Lazily return the last x items from this iterable or default.
Below is the the instruction that describes the task: ### Input: Lazily return the last x items from this iterable or default. ### Response: def lasts(iterable, items=1, default=None): # type: (Iterable[T], int, T) -> Iterable[T] """ Lazily return the last x items from this iterable or default. """ last_items = deque(iterable, maxlen=items) for _ in range(items - len(last_items)): yield default for y in last_items: yield y
def all_subclasses(cls): """Generator yielding all subclasses of `cls` recursively""" for subcls in cls.__subclasses__(): yield subcls for subsubcls in all_subclasses(subcls): yield subsubcls
Generator yielding all subclasses of `cls` recursively
Below is the the instruction that describes the task: ### Input: Generator yielding all subclasses of `cls` recursively ### Response: def all_subclasses(cls): """Generator yielding all subclasses of `cls` recursively""" for subcls in cls.__subclasses__(): yield subcls for subsubcls in all_subclasses(subcls): yield subsubcls
def login_defs(): """Discover the minimum and maximum UID number.""" uid_min = None uid_max = None login_defs_path = '/etc/login.defs' if os.path.exists(login_defs_path): with io.open(text_type(login_defs_path), encoding=text_type('utf-8')) as log_defs_file: login_data = log_defs_file.readlines() for line in login_data: if PY3: # pragma: no cover line = str(line) if PY2: # pragma: no cover line = line.encode(text_type('utf8')) if line[:7] == text_type('UID_MIN'): uid_min = int(line.split()[1].strip()) if line[:7] == text_type('UID_MAX'): uid_max = int(line.split()[1].strip()) if not uid_min: # pragma: no cover uid_min = DEFAULT_UID_MIN if not uid_max: # pragma: no cover uid_max = DEFAULT_UID_MAX return uid_min, uid_max
Discover the minimum and maximum UID number.
Below is the the instruction that describes the task: ### Input: Discover the minimum and maximum UID number. ### Response: def login_defs(): """Discover the minimum and maximum UID number.""" uid_min = None uid_max = None login_defs_path = '/etc/login.defs' if os.path.exists(login_defs_path): with io.open(text_type(login_defs_path), encoding=text_type('utf-8')) as log_defs_file: login_data = log_defs_file.readlines() for line in login_data: if PY3: # pragma: no cover line = str(line) if PY2: # pragma: no cover line = line.encode(text_type('utf8')) if line[:7] == text_type('UID_MIN'): uid_min = int(line.split()[1].strip()) if line[:7] == text_type('UID_MAX'): uid_max = int(line.split()[1].strip()) if not uid_min: # pragma: no cover uid_min = DEFAULT_UID_MIN if not uid_max: # pragma: no cover uid_max = DEFAULT_UID_MAX return uid_min, uid_max
def _dump_to_json(self, with_stats): """ Dump the models into a list of strings. Each string is a text representation of a tree. Parameters ---------- with_stats : bool If true, include node statistics in the output. Returns ------- out : SFrame A table with two columns: feature, count, ordered by 'count' in descending order. """ import json trees_json_str = tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='json') trees_json = [json.loads(x) for x in trees_json_str] # To avoid lose precision when using libjson, _dump_model with json format encode # numerical values in hexadecimal (little endian). # Now we need to convert them back to floats, using unpack. '<f' means single precision float # in little endian import struct import sys def hexadecimal_to_float(s): if sys.version_info[0] >= 3: return struct.unpack('<f', bytes.fromhex(s))[0] # unpack always return a tuple else: return struct.unpack('<f', s.decode('hex'))[0] # unpack always return a tuple for d in trees_json: nodes = d['vertices'] for n in nodes: if 'value_hexadecimal' in n: n['value'] = hexadecimal_to_float(n['value_hexadecimal']) return trees_json
Dump the models into a list of strings. Each string is a text representation of a tree. Parameters ---------- with_stats : bool If true, include node statistics in the output. Returns ------- out : SFrame A table with two columns: feature, count, ordered by 'count' in descending order.
Below is the the instruction that describes the task: ### Input: Dump the models into a list of strings. Each string is a text representation of a tree. Parameters ---------- with_stats : bool If true, include node statistics in the output. Returns ------- out : SFrame A table with two columns: feature, count, ordered by 'count' in descending order. ### Response: def _dump_to_json(self, with_stats): """ Dump the models into a list of strings. Each string is a text representation of a tree. Parameters ---------- with_stats : bool If true, include node statistics in the output. Returns ------- out : SFrame A table with two columns: feature, count, ordered by 'count' in descending order. """ import json trees_json_str = tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='json') trees_json = [json.loads(x) for x in trees_json_str] # To avoid lose precision when using libjson, _dump_model with json format encode # numerical values in hexadecimal (little endian). # Now we need to convert them back to floats, using unpack. '<f' means single precision float # in little endian import struct import sys def hexadecimal_to_float(s): if sys.version_info[0] >= 3: return struct.unpack('<f', bytes.fromhex(s))[0] # unpack always return a tuple else: return struct.unpack('<f', s.decode('hex'))[0] # unpack always return a tuple for d in trees_json: nodes = d['vertices'] for n in nodes: if 'value_hexadecimal' in n: n['value'] = hexadecimal_to_float(n['value_hexadecimal']) return trees_json
def set_memory_cache(self, results, key=None): """Store result in memory cache with key matching model state.""" key = self.model.hash if key is None else key self.memory_cache[key] = results
Store result in memory cache with key matching model state.
Below is the the instruction that describes the task: ### Input: Store result in memory cache with key matching model state. ### Response: def set_memory_cache(self, results, key=None): """Store result in memory cache with key matching model state.""" key = self.model.hash if key is None else key self.memory_cache[key] = results
def _update_trial_queue(self, blocking=False, timeout=600): """Adds next trials to queue if possible. Note that the timeout is currently unexposed to the user. Args: blocking (bool): Blocks until either a trial is available or is_finished (timeout or search algorithm finishes). timeout (int): Seconds before blocking times out. """ trials = self._search_alg.next_trials() if blocking and not trials: start = time.time() # Checking `is_finished` instead of _search_alg.is_finished # is fine because blocking only occurs if all trials are # finished and search_algorithm is not yet finished while (not trials and not self.is_finished() and time.time() - start < timeout): logger.info("Blocking for next trial...") trials = self._search_alg.next_trials() time.sleep(1) for trial in trials: self.add_trial(trial)
Adds next trials to queue if possible. Note that the timeout is currently unexposed to the user. Args: blocking (bool): Blocks until either a trial is available or is_finished (timeout or search algorithm finishes). timeout (int): Seconds before blocking times out.
Below is the the instruction that describes the task: ### Input: Adds next trials to queue if possible. Note that the timeout is currently unexposed to the user. Args: blocking (bool): Blocks until either a trial is available or is_finished (timeout or search algorithm finishes). timeout (int): Seconds before blocking times out. ### Response: def _update_trial_queue(self, blocking=False, timeout=600): """Adds next trials to queue if possible. Note that the timeout is currently unexposed to the user. Args: blocking (bool): Blocks until either a trial is available or is_finished (timeout or search algorithm finishes). timeout (int): Seconds before blocking times out. """ trials = self._search_alg.next_trials() if blocking and not trials: start = time.time() # Checking `is_finished` instead of _search_alg.is_finished # is fine because blocking only occurs if all trials are # finished and search_algorithm is not yet finished while (not trials and not self.is_finished() and time.time() - start < timeout): logger.info("Blocking for next trial...") trials = self._search_alg.next_trials() time.sleep(1) for trial in trials: self.add_trial(trial)
def racks(self): """ Gets the Racks API client. Returns: Racks: """ if not self.__racks: self.__racks = Racks(self.__connection) return self.__racks
Gets the Racks API client. Returns: Racks:
Below is the the instruction that describes the task: ### Input: Gets the Racks API client. Returns: Racks: ### Response: def racks(self): """ Gets the Racks API client. Returns: Racks: """ if not self.__racks: self.__racks = Racks(self.__connection) return self.__racks
def get_yaml_files_at_env_root(self): """Return list of yaml files in env_root.""" yaml_files = glob.glob( os.path.join(self.env_root, '*.yaml') ) yml_files = glob.glob( os.path.join(self.env_root, '*.yml') ) return yaml_files + yml_files
Return list of yaml files in env_root.
Below is the the instruction that describes the task: ### Input: Return list of yaml files in env_root. ### Response: def get_yaml_files_at_env_root(self): """Return list of yaml files in env_root.""" yaml_files = glob.glob( os.path.join(self.env_root, '*.yaml') ) yml_files = glob.glob( os.path.join(self.env_root, '*.yml') ) return yaml_files + yml_files
def hide_button_span(self, mode, file=sys.stdout): """ :param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file: """ file.write("\033[83;%iu" % mode) yield file.write("\033[83;0u")
:param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file:
Below is the the instruction that describes the task: ### Input: :param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file: ### Response: def hide_button_span(self, mode, file=sys.stdout): """ :param int mode: 1 or 2 :param io.TextIOBase|io.StringIO file: """ file.write("\033[83;%iu" % mode) yield file.write("\033[83;0u")
def _ns_var( py_ns_var: str = _NS_VAR, lisp_ns_var: str = LISP_NS_VAR, lisp_ns_ns: str = CORE_NS ) -> ast.Assign: """Assign a Python variable named `ns_var` to the value of the current namespace.""" return ast.Assign( targets=[ast.Name(id=py_ns_var, ctx=ast.Store())], value=ast.Call( func=_FIND_VAR_FN_NAME, args=[ ast.Call( func=_NEW_SYM_FN_NAME, args=[ast.Str(lisp_ns_var)], keywords=[ast.keyword(arg="ns", value=ast.Str(lisp_ns_ns))], ) ], keywords=[], ), )
Assign a Python variable named `ns_var` to the value of the current namespace.
Below is the the instruction that describes the task: ### Input: Assign a Python variable named `ns_var` to the value of the current namespace. ### Response: def _ns_var( py_ns_var: str = _NS_VAR, lisp_ns_var: str = LISP_NS_VAR, lisp_ns_ns: str = CORE_NS ) -> ast.Assign: """Assign a Python variable named `ns_var` to the value of the current namespace.""" return ast.Assign( targets=[ast.Name(id=py_ns_var, ctx=ast.Store())], value=ast.Call( func=_FIND_VAR_FN_NAME, args=[ ast.Call( func=_NEW_SYM_FN_NAME, args=[ast.Str(lisp_ns_var)], keywords=[ast.keyword(arg="ns", value=ast.Str(lisp_ns_ns))], ) ], keywords=[], ), )
def reciprocal_rank( model, test_interactions, train_interactions=None, user_features=None, item_features=None, preserve_rows=False, num_threads=1, check_intersections=True, ): """ Measure the reciprocal rank metric for a model: 1 / the rank of the highest ranked positive example. A perfect score is 1.0. Parameters ---------- model: LightFM instance the fitted model to be evaluated test_interactions: np.float32 csr_matrix of shape [n_users, n_items] Non-zero entries representing known positives in the evaluation set. train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional Non-zero entries representing known positives in the train set. These will be omitted from the score calculations to avoid re-recommending known positives. user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional Each row contains that user's weights over features. item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. preserve_rows: boolean, optional When False (default), the number of rows in the output will be equal to the number of users with interactions in the evaluation set. When True, the number of rows in the output will be equal to the number of users. num_threads: int, optional Number of parallel computation threads to use. Should not be higher than the number of physical cores. check_intersections: bool, optional, True by default, Only relevant when train_interactions are supplied. A flag that signals whether the test and train matrices should be checked for intersections to prevent optimistic ranks / wrong evaluation / bad data split. Returns ------- np.array of shape [n_users with interactions or n_users,] Numpy array containing reciprocal rank scores for each user. If there are no interactions for a given user the returned value will be 0.0. """ if num_threads < 1: raise ValueError("Number of threads must be 1 or larger.") ranks = model.predict_rank( test_interactions, train_interactions=train_interactions, user_features=user_features, item_features=item_features, num_threads=num_threads, check_intersections=check_intersections, ) ranks.data = 1.0 / (ranks.data + 1.0) ranks = np.squeeze(np.array(ranks.max(axis=1).todense())) if not preserve_rows: ranks = ranks[test_interactions.getnnz(axis=1) > 0] return ranks
Measure the reciprocal rank metric for a model: 1 / the rank of the highest ranked positive example. A perfect score is 1.0. Parameters ---------- model: LightFM instance the fitted model to be evaluated test_interactions: np.float32 csr_matrix of shape [n_users, n_items] Non-zero entries representing known positives in the evaluation set. train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional Non-zero entries representing known positives in the train set. These will be omitted from the score calculations to avoid re-recommending known positives. user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional Each row contains that user's weights over features. item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. preserve_rows: boolean, optional When False (default), the number of rows in the output will be equal to the number of users with interactions in the evaluation set. When True, the number of rows in the output will be equal to the number of users. num_threads: int, optional Number of parallel computation threads to use. Should not be higher than the number of physical cores. check_intersections: bool, optional, True by default, Only relevant when train_interactions are supplied. A flag that signals whether the test and train matrices should be checked for intersections to prevent optimistic ranks / wrong evaluation / bad data split. Returns ------- np.array of shape [n_users with interactions or n_users,] Numpy array containing reciprocal rank scores for each user. If there are no interactions for a given user the returned value will be 0.0.
Below is the the instruction that describes the task: ### Input: Measure the reciprocal rank metric for a model: 1 / the rank of the highest ranked positive example. A perfect score is 1.0. Parameters ---------- model: LightFM instance the fitted model to be evaluated test_interactions: np.float32 csr_matrix of shape [n_users, n_items] Non-zero entries representing known positives in the evaluation set. train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional Non-zero entries representing known positives in the train set. These will be omitted from the score calculations to avoid re-recommending known positives. user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional Each row contains that user's weights over features. item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. preserve_rows: boolean, optional When False (default), the number of rows in the output will be equal to the number of users with interactions in the evaluation set. When True, the number of rows in the output will be equal to the number of users. num_threads: int, optional Number of parallel computation threads to use. Should not be higher than the number of physical cores. check_intersections: bool, optional, True by default, Only relevant when train_interactions are supplied. A flag that signals whether the test and train matrices should be checked for intersections to prevent optimistic ranks / wrong evaluation / bad data split. Returns ------- np.array of shape [n_users with interactions or n_users,] Numpy array containing reciprocal rank scores for each user. If there are no interactions for a given user the returned value will be 0.0. ### Response: def reciprocal_rank( model, test_interactions, train_interactions=None, user_features=None, item_features=None, preserve_rows=False, num_threads=1, check_intersections=True, ): """ Measure the reciprocal rank metric for a model: 1 / the rank of the highest ranked positive example. A perfect score is 1.0. Parameters ---------- model: LightFM instance the fitted model to be evaluated test_interactions: np.float32 csr_matrix of shape [n_users, n_items] Non-zero entries representing known positives in the evaluation set. train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional Non-zero entries representing known positives in the train set. These will be omitted from the score calculations to avoid re-recommending known positives. user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional Each row contains that user's weights over features. item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. preserve_rows: boolean, optional When False (default), the number of rows in the output will be equal to the number of users with interactions in the evaluation set. When True, the number of rows in the output will be equal to the number of users. num_threads: int, optional Number of parallel computation threads to use. Should not be higher than the number of physical cores. check_intersections: bool, optional, True by default, Only relevant when train_interactions are supplied. A flag that signals whether the test and train matrices should be checked for intersections to prevent optimistic ranks / wrong evaluation / bad data split. Returns ------- np.array of shape [n_users with interactions or n_users,] Numpy array containing reciprocal rank scores for each user. If there are no interactions for a given user the returned value will be 0.0. """ if num_threads < 1: raise ValueError("Number of threads must be 1 or larger.") ranks = model.predict_rank( test_interactions, train_interactions=train_interactions, user_features=user_features, item_features=item_features, num_threads=num_threads, check_intersections=check_intersections, ) ranks.data = 1.0 / (ranks.data + 1.0) ranks = np.squeeze(np.array(ranks.max(axis=1).todense())) if not preserve_rows: ranks = ranks[test_interactions.getnnz(axis=1) > 0] return ranks
def sample(self, size=(), rule="R", antithetic=None): """ Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``. """ size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple, list, numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size from . import sampler out = sampler.generator.generate_samples( order=size_, domain=self, rule=rule, antithetic=antithetic) try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, int(out.size/dim)) return out
Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``.
Below is the the instruction that describes the task: ### Input: Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``. ### Response: def sample(self, size=(), rule="R", antithetic=None): """ Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``. """ size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple, list, numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size from . import sampler out = sampler.generator.generate_samples( order=size_, domain=self, rule=rule, antithetic=antithetic) try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, int(out.size/dim)) return out
def add_data(self, request, pk=None): """Add data to Entity and it's collection.""" # add data to entity resp = super().add_data(request, pk) # add data to collections in which entity is entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
Add data to Entity and it's collection.
Below is the the instruction that describes the task: ### Input: Add data to Entity and it's collection. ### Response: def add_data(self, request, pk=None): """Add data to Entity and it's collection.""" # add data to entity resp = super().add_data(request, pk) # add data to collections in which entity is entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
def all(self, data={}, **kwargs): """" Fetch all Subscription entities Returns: Dictionary of Subscription data """ return super(Subscription, self).all(data, **kwargs)
Fetch all Subscription entities Returns: Dictionary of Subscription data
Below is the the instruction that describes the task: ### Input: Fetch all Subscription entities Returns: Dictionary of Subscription data ### Response: def all(self, data={}, **kwargs): """" Fetch all Subscription entities Returns: Dictionary of Subscription data """ return super(Subscription, self).all(data, **kwargs)
def set_notebook(note_store, my_notebook, notebook_id): """ create a notebook """ if notebook_id == 0: new_notebook = Types.Notebook() new_notebook.name = my_notebook new_notebook.defaultNotebook = False notebook_id = note_store.createNotebook(new_notebook).guid return notebook_id
create a notebook
Below is the the instruction that describes the task: ### Input: create a notebook ### Response: def set_notebook(note_store, my_notebook, notebook_id): """ create a notebook """ if notebook_id == 0: new_notebook = Types.Notebook() new_notebook.name = my_notebook new_notebook.defaultNotebook = False notebook_id = note_store.createNotebook(new_notebook).guid return notebook_id
def get_assessment_form_for_create(self, assessment_record_types): """Gets the assessment form for creating new assessments. A new form should be requested for each create transaction. arg: assessment_record_types (osid.type.Type[]): array of assessment record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentForm) - the assessment form raise: NullArgument - ``assessment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in assessment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if assessment_record_types == []: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, record_types=assessment_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
Gets the assessment form for creating new assessments. A new form should be requested for each create transaction. arg: assessment_record_types (osid.type.Type[]): array of assessment record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentForm) - the assessment form raise: NullArgument - ``assessment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the assessment form for creating new assessments. A new form should be requested for each create transaction. arg: assessment_record_types (osid.type.Type[]): array of assessment record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentForm) - the assessment form raise: NullArgument - ``assessment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* ### Response: def get_assessment_form_for_create(self, assessment_record_types): """Gets the assessment form for creating new assessments. A new form should be requested for each create transaction. arg: assessment_record_types (osid.type.Type[]): array of assessment record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentForm) - the assessment form raise: NullArgument - ``assessment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.get_resource_form_for_create_template for arg in assessment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if assessment_record_types == []: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, record_types=assessment_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form