code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def evalop(op,left,right): "this takes evaluated left and right (i.e. values not expressions)" if op in ('=','!=','>','<'): return threevl.ThreeVL.compare(op,left,right) elif op in ('+','-','*','/'): # todo: does arithmetic require threevl? if op=='/': raise NotImplementedError('todo: spec about int/float division') return (left + right) if op=='+' else (left - right) if op=='-' else (left * right) if op=='*' else (left/right) elif op=='in': return (tuple(left) in right) if isinstance(left,list) and isinstance(right[0],tuple) else (left in right) elif op in ('and','or'): return threevl.ThreeVL.andor(op,left,right) elif op in ('is not','is'): if right is not None: raise NotImplementedError('can null be on either side? what if neither value is null?') return (left is not None) if (op=='is not') else (left is None) elif op=='@>': # todo: support a TextSearchDoc that will overload a lot of these operators if not all(isinstance(x,list) for x in (left,right)): raise TypeError('non-array-args',op,left,right) return set(left)>=set(right) elif op=='||': if not all(isinstance(x,list) for x in (left,right)): raise TypeError('non-array-args',op,left,right) return left+right elif op=='@@': if not all(isinstance(x,set) for x in (left,right)): raise TypeError('non_set_args',op,type(left),type(right)) return bool(left&right) else: raise NotImplementedError(op,left,right)
this takes evaluated left and right (i.e. values not expressions)
Below is the the instruction that describes the task: ### Input: this takes evaluated left and right (i.e. values not expressions) ### Response: def evalop(op,left,right): "this takes evaluated left and right (i.e. values not expressions)" if op in ('=','!=','>','<'): return threevl.ThreeVL.compare(op,left,right) elif op in ('+','-','*','/'): # todo: does arithmetic require threevl? if op=='/': raise NotImplementedError('todo: spec about int/float division') return (left + right) if op=='+' else (left - right) if op=='-' else (left * right) if op=='*' else (left/right) elif op=='in': return (tuple(left) in right) if isinstance(left,list) and isinstance(right[0],tuple) else (left in right) elif op in ('and','or'): return threevl.ThreeVL.andor(op,left,right) elif op in ('is not','is'): if right is not None: raise NotImplementedError('can null be on either side? what if neither value is null?') return (left is not None) if (op=='is not') else (left is None) elif op=='@>': # todo: support a TextSearchDoc that will overload a lot of these operators if not all(isinstance(x,list) for x in (left,right)): raise TypeError('non-array-args',op,left,right) return set(left)>=set(right) elif op=='||': if not all(isinstance(x,list) for x in (left,right)): raise TypeError('non-array-args',op,left,right) return left+right elif op=='@@': if not all(isinstance(x,set) for x in (left,right)): raise TypeError('non_set_args',op,type(left),type(right)) return bool(left&right) else: raise NotImplementedError(op,left,right)
def move_edge_source(self, edge_id, node_a, node_b): """Moves an edge originating from node_a so that it originates from node_b.""" # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (node_b, edge['vertices'][1]) # Remove the edge from node_a node = self.get_node(node_a) node['edges'].remove(edge_id) # Add the edge to node_b node = self.get_node(node_b) node['edges'].append(edge_id)
Moves an edge originating from node_a so that it originates from node_b.
Below is the the instruction that describes the task: ### Input: Moves an edge originating from node_a so that it originates from node_b. ### Response: def move_edge_source(self, edge_id, node_a, node_b): """Moves an edge originating from node_a so that it originates from node_b.""" # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (node_b, edge['vertices'][1]) # Remove the edge from node_a node = self.get_node(node_a) node['edges'].remove(edge_id) # Add the edge to node_b node = self.get_node(node_b) node['edges'].append(edge_id)
def gen_matches(self, word): """Generate a sequence of possible completions for ``word``. :param word: the word to complete """ if word.startswith("$"): for match in self.gen_variable_completions(word, os.environ): yield match else: head, tail = os.path.split(word) filenames = os.listdir(head or '.') completions = self.gen_filename_completions(tail, filenames) for match in completions: yield os.path.join(head, match) for extension in self.extensions: for match in extension(word): yield match
Generate a sequence of possible completions for ``word``. :param word: the word to complete
Below is the the instruction that describes the task: ### Input: Generate a sequence of possible completions for ``word``. :param word: the word to complete ### Response: def gen_matches(self, word): """Generate a sequence of possible completions for ``word``. :param word: the word to complete """ if word.startswith("$"): for match in self.gen_variable_completions(word, os.environ): yield match else: head, tail = os.path.split(word) filenames = os.listdir(head or '.') completions = self.gen_filename_completions(tail, filenames) for match in completions: yield os.path.join(head, match) for extension in self.extensions: for match in extension(word): yield match
def get_function_from_settings(settings_key): """Gets a function from the string path defined in a settings file. Example: # my_app/my_file.py def some_function(): # do something pass # settings.py SOME_FUNCTION = 'my_app.my_file.some_function' > get_function_from_settings('SOME_FUNCTION') <function my_app.my_file.some_function> """ renderer_func_str = getattr(settings, settings_key, None) if not renderer_func_str: return None module_str, renderer_func_name = renderer_func_str.rsplit('.', 1) try: mod = importlib.import_module(module_str) return getattr(mod, renderer_func_name) except Exception: return None
Gets a function from the string path defined in a settings file. Example: # my_app/my_file.py def some_function(): # do something pass # settings.py SOME_FUNCTION = 'my_app.my_file.some_function' > get_function_from_settings('SOME_FUNCTION') <function my_app.my_file.some_function>
Below is the the instruction that describes the task: ### Input: Gets a function from the string path defined in a settings file. Example: # my_app/my_file.py def some_function(): # do something pass # settings.py SOME_FUNCTION = 'my_app.my_file.some_function' > get_function_from_settings('SOME_FUNCTION') <function my_app.my_file.some_function> ### Response: def get_function_from_settings(settings_key): """Gets a function from the string path defined in a settings file. Example: # my_app/my_file.py def some_function(): # do something pass # settings.py SOME_FUNCTION = 'my_app.my_file.some_function' > get_function_from_settings('SOME_FUNCTION') <function my_app.my_file.some_function> """ renderer_func_str = getattr(settings, settings_key, None) if not renderer_func_str: return None module_str, renderer_func_name = renderer_func_str.rsplit('.', 1) try: mod = importlib.import_module(module_str) return getattr(mod, renderer_func_name) except Exception: return None
def update_build_configuration_sets(self, id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param list[BuildConfigurationSetRest] body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_build_configuration_sets_with_http_info(id, **kwargs) else: (data) = self.update_build_configuration_sets_with_http_info(id, **kwargs) return data
This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param list[BuildConfigurationSetRest] body: :return: None If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param list[BuildConfigurationSetRest] body: :return: None If the method is called asynchronously, returns the request thread. ### Response: def update_build_configuration_sets(self, id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_build_configuration_sets(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Product Version id (required) :param list[BuildConfigurationSetRest] body: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_build_configuration_sets_with_http_info(id, **kwargs) else: (data) = self.update_build_configuration_sets_with_http_info(id, **kwargs) return data
def divConn (self, preCellsTags, postCellsTags, connParam): from .. import sim ''' Generates connections between all pre and post-syn cells based on probability values''' if sim.cfg.verbose: print('Generating set of divergent connections (rule: %s) ...' % (connParam['label'])) # get list of params that have a lambda function paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam] # copy the vars into args immediately and work out which keys are associated with lambda functions only once per method funcKeys = {} for paramStrFunc in paramsStrFunc: connParam[paramStrFunc + 'Args'] = connParam[paramStrFunc + 'Vars'].copy() funcKeys[paramStrFunc] = [key for key in connParam[paramStrFunc + 'Vars'] if callable(connParam[paramStrFunc + 'Vars'][key])] # converted to list only once postCellsTagsKeys = sorted(postCellsTags) # calculate hash for post cell gids hashPostCells = sim.hashList(postCellsTagsKeys) for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell divergence = connParam['divergenceFunc'][preCellGid] if 'divergenceFunc' in connParam else connParam['divergence'] # num of presyn conns / postsyn cell divergence = max(min(int(round(divergence)), len(postCellsTags)-1), 0) self.rand.Random123(hashPostCells, preCellGid, sim.cfg.seeds['conn']) # init randomizer randSample = self.randUniqueInt(self.rand, divergence+1, 0, len(postCellsTags)-1) # note: randSample[divergence] is an extra value used only if one of the random postGids coincided with the preGid postCellsSample = {postCellsTagsKeys[randSample[divergence]] if postCellsTagsKeys[i]==preCellGid else postCellsTagsKeys[i]: 0 for i in randSample[0:divergence]} # dict of selected gids of postsyn cells with removed pre gid for postCellGid in [c for c in postCellsSample if c in self.gid2lid]: postCellTags = postCellsTags[postCellGid] for paramStrFunc in paramsStrFunc: # call lambda functions to get weight func args # update the relevant FuncArgs dict where lambda functions are known to exist in the corresponding FuncVars dict for funcKey in funcKeys[paramStrFunc]: connParam[paramStrFunc + 'Args'][funcKey] = connParam[paramStrFunc+'Vars'][funcKey](preCellTags,postCellTags) if preCellGid != postCellGid: # if not self-connection self._addCellConn(connParam, preCellGid, postCellGid)
Generates connections between all pre and post-syn cells based on probability values
Below is the the instruction that describes the task: ### Input: Generates connections between all pre and post-syn cells based on probability values ### Response: def divConn (self, preCellsTags, postCellsTags, connParam): from .. import sim ''' Generates connections between all pre and post-syn cells based on probability values''' if sim.cfg.verbose: print('Generating set of divergent connections (rule: %s) ...' % (connParam['label'])) # get list of params that have a lambda function paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam] # copy the vars into args immediately and work out which keys are associated with lambda functions only once per method funcKeys = {} for paramStrFunc in paramsStrFunc: connParam[paramStrFunc + 'Args'] = connParam[paramStrFunc + 'Vars'].copy() funcKeys[paramStrFunc] = [key for key in connParam[paramStrFunc + 'Vars'] if callable(connParam[paramStrFunc + 'Vars'][key])] # converted to list only once postCellsTagsKeys = sorted(postCellsTags) # calculate hash for post cell gids hashPostCells = sim.hashList(postCellsTagsKeys) for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell divergence = connParam['divergenceFunc'][preCellGid] if 'divergenceFunc' in connParam else connParam['divergence'] # num of presyn conns / postsyn cell divergence = max(min(int(round(divergence)), len(postCellsTags)-1), 0) self.rand.Random123(hashPostCells, preCellGid, sim.cfg.seeds['conn']) # init randomizer randSample = self.randUniqueInt(self.rand, divergence+1, 0, len(postCellsTags)-1) # note: randSample[divergence] is an extra value used only if one of the random postGids coincided with the preGid postCellsSample = {postCellsTagsKeys[randSample[divergence]] if postCellsTagsKeys[i]==preCellGid else postCellsTagsKeys[i]: 0 for i in randSample[0:divergence]} # dict of selected gids of postsyn cells with removed pre gid for postCellGid in [c for c in postCellsSample if c in self.gid2lid]: postCellTags = postCellsTags[postCellGid] for paramStrFunc in paramsStrFunc: # call lambda functions to get weight func args # update the relevant FuncArgs dict where lambda functions are known to exist in the corresponding FuncVars dict for funcKey in funcKeys[paramStrFunc]: connParam[paramStrFunc + 'Args'][funcKey] = connParam[paramStrFunc+'Vars'][funcKey](preCellTags,postCellTags) if preCellGid != postCellGid: # if not self-connection self._addCellConn(connParam, preCellGid, postCellGid)
def _fetch_features(self): """ Retrieves a new page of features from Geopedia """ if self.next_page_url is None: return response = get_json(self.next_page_url, post_values=self.query, headers=self.gpd_session.session_headers) self.features.extend(response['features']) self.next_page_url = response['pagination']['next'] self.layer_size = response['pagination']['total']
Retrieves a new page of features from Geopedia
Below is the the instruction that describes the task: ### Input: Retrieves a new page of features from Geopedia ### Response: def _fetch_features(self): """ Retrieves a new page of features from Geopedia """ if self.next_page_url is None: return response = get_json(self.next_page_url, post_values=self.query, headers=self.gpd_session.session_headers) self.features.extend(response['features']) self.next_page_url = response['pagination']['next'] self.layer_size = response['pagination']['total']
def getDatastream(self, pid, dsID, asOfDateTime=None, validateChecksum=False): """Get information about a single datastream on a Fedora object; optionally, get information for the version of the datastream as of a particular date time. :param pid: object pid :param dsID: datastream id :param asOfDateTime: optional datetime; ``must`` be a non-naive datetime so it can be converted to a date-time format Fedora can understand :param validateChecksum: boolean; if True, request Fedora to recalculate and verify the stored checksum against actual data :rtype: :class:`requests.models.Response` """ # /objects/{pid}/datastreams/{dsID} ? [asOfDateTime] [format] [validateChecksum] http_args = {} if validateChecksum: # fedora only responds to lower-case validateChecksum option http_args['validateChecksum'] = str(validateChecksum).lower() if asOfDateTime: http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime) http_args.update(self.format_xml) uri = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID} return self.get(uri, params=http_args)
Get information about a single datastream on a Fedora object; optionally, get information for the version of the datastream as of a particular date time. :param pid: object pid :param dsID: datastream id :param asOfDateTime: optional datetime; ``must`` be a non-naive datetime so it can be converted to a date-time format Fedora can understand :param validateChecksum: boolean; if True, request Fedora to recalculate and verify the stored checksum against actual data :rtype: :class:`requests.models.Response`
Below is the the instruction that describes the task: ### Input: Get information about a single datastream on a Fedora object; optionally, get information for the version of the datastream as of a particular date time. :param pid: object pid :param dsID: datastream id :param asOfDateTime: optional datetime; ``must`` be a non-naive datetime so it can be converted to a date-time format Fedora can understand :param validateChecksum: boolean; if True, request Fedora to recalculate and verify the stored checksum against actual data :rtype: :class:`requests.models.Response` ### Response: def getDatastream(self, pid, dsID, asOfDateTime=None, validateChecksum=False): """Get information about a single datastream on a Fedora object; optionally, get information for the version of the datastream as of a particular date time. :param pid: object pid :param dsID: datastream id :param asOfDateTime: optional datetime; ``must`` be a non-naive datetime so it can be converted to a date-time format Fedora can understand :param validateChecksum: boolean; if True, request Fedora to recalculate and verify the stored checksum against actual data :rtype: :class:`requests.models.Response` """ # /objects/{pid}/datastreams/{dsID} ? [asOfDateTime] [format] [validateChecksum] http_args = {} if validateChecksum: # fedora only responds to lower-case validateChecksum option http_args['validateChecksum'] = str(validateChecksum).lower() if asOfDateTime: http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime) http_args.update(self.format_xml) uri = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID} return self.get(uri, params=http_args)
def process_node(e): """ Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters ---------- e : dict individual node element in downloaded OSM json Returns ------- node : dict """ node = {'id': e['id'], 'lat': e['lat'], 'lon': e['lon']} if 'tags' in e: if e['tags'] is not np.nan: for t, v in list(e['tags'].items()): if t in config.settings.keep_osm_tags: node[t] = v return node
Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters ---------- e : dict individual node element in downloaded OSM json Returns ------- node : dict
Below is the the instruction that describes the task: ### Input: Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters ---------- e : dict individual node element in downloaded OSM json Returns ------- node : dict ### Response: def process_node(e): """ Process a node element entry into a dict suitable for going into a Pandas DataFrame. Parameters ---------- e : dict individual node element in downloaded OSM json Returns ------- node : dict """ node = {'id': e['id'], 'lat': e['lat'], 'lon': e['lon']} if 'tags' in e: if e['tags'] is not np.nan: for t, v in list(e['tags'].items()): if t in config.settings.keep_osm_tags: node[t] = v return node
def send_refactor_request(self, ref_type, ref_params, ref_options): """Send a refactor request to the Ensime server. The `ref_params` field will always have a field `type`. """ request = { "typehint": ref_type, "procId": self.refactor_id, "params": ref_params } f = ref_params["file"] self.refactorings[self.refactor_id] = f self.refactor_id += 1 request.update(ref_options) self.send_request(request)
Send a refactor request to the Ensime server. The `ref_params` field will always have a field `type`.
Below is the the instruction that describes the task: ### Input: Send a refactor request to the Ensime server. The `ref_params` field will always have a field `type`. ### Response: def send_refactor_request(self, ref_type, ref_params, ref_options): """Send a refactor request to the Ensime server. The `ref_params` field will always have a field `type`. """ request = { "typehint": ref_type, "procId": self.refactor_id, "params": ref_params } f = ref_params["file"] self.refactorings[self.refactor_id] = f self.refactor_id += 1 request.update(ref_options) self.send_request(request)
def scan(self): """ Scan source and grab tokens. """ self.pre_scan() token = None end = len(self.source) while self.pos < end: best_pat = None best_pat_len = 0 # Check patterns for p, regexp in self.patterns: m = regexp.match(self.source, self.pos) if m: best_pat = p best_pat_len = len(m.group(0)) break if best_pat is None: raise SyntaxError( "SyntaxError[@char {0}: {1}]".format( self.pos, "Bad token.")) # Ignore patterns if best_pat in self.ignore: self.pos += best_pat_len continue # Create token token = ( best_pat, self.source[self.pos:self.pos + best_pat_len], self.pos, self.pos + best_pat_len, ) self.pos = token[-1] self.tokens.append(token)
Scan source and grab tokens.
Below is the the instruction that describes the task: ### Input: Scan source and grab tokens. ### Response: def scan(self): """ Scan source and grab tokens. """ self.pre_scan() token = None end = len(self.source) while self.pos < end: best_pat = None best_pat_len = 0 # Check patterns for p, regexp in self.patterns: m = regexp.match(self.source, self.pos) if m: best_pat = p best_pat_len = len(m.group(0)) break if best_pat is None: raise SyntaxError( "SyntaxError[@char {0}: {1}]".format( self.pos, "Bad token.")) # Ignore patterns if best_pat in self.ignore: self.pos += best_pat_len continue # Create token token = ( best_pat, self.source[self.pos:self.pos + best_pat_len], self.pos, self.pos + best_pat_len, ) self.pos = token[-1] self.tokens.append(token)
def encode_many(chord_labels, reduce_extended_chords=False): """Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes. """ num_items = len(chord_labels) roots, basses = np.zeros([2, num_items], dtype=np.int) semitones = np.zeros([num_items, 12], dtype=np.int) local_cache = dict() for i, label in enumerate(chord_labels): result = local_cache.get(label, None) if result is None: result = encode(label, reduce_extended_chords) local_cache[label] = result roots[i], semitones[i], basses[i] = result return roots, semitones, basses
Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes.
Below is the the instruction that describes the task: ### Input: Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes. ### Response: def encode_many(chord_labels, reduce_extended_chords=False): """Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes. """ num_items = len(chord_labels) roots, basses = np.zeros([2, num_items], dtype=np.int) semitones = np.zeros([num_items, 12], dtype=np.int) local_cache = dict() for i, label in enumerate(chord_labels): result = local_cache.get(label, None) if result is None: result = encode(label, reduce_extended_chords) local_cache[label] = result roots[i], semitones[i], basses[i] = result return roots, semitones, basses
def fromtextindex(index_or_dirname, indexname=None, docnum_field=None): """ Extract all documents from a Whoosh index. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in(dirname, schema) >>> writer = index.writer() >>> writer.add_document(title=u"First document", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Second document", path=u"/b", ... content=u"The second one is even more interesting!") >>> writer.commit() >>> # extract documents as a table ... table = etl.fromtextindex(dirname) >>> table +------+-------------------+ | path | title | +======+===================+ | '/a' | 'First document' | +------+-------------------+ | '/b' | 'Second document' | +------+-------------------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument. """ return TextIndexView(index_or_dirname, indexname=indexname, docnum_field=docnum_field)
Extract all documents from a Whoosh index. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in(dirname, schema) >>> writer = index.writer() >>> writer.add_document(title=u"First document", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Second document", path=u"/b", ... content=u"The second one is even more interesting!") >>> writer.commit() >>> # extract documents as a table ... table = etl.fromtextindex(dirname) >>> table +------+-------------------+ | path | title | +======+===================+ | '/a' | 'First document' | +------+-------------------+ | '/b' | 'Second document' | +------+-------------------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument.
Below is the the instruction that describes the task: ### Input: Extract all documents from a Whoosh index. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in(dirname, schema) >>> writer = index.writer() >>> writer.add_document(title=u"First document", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Second document", path=u"/b", ... content=u"The second one is even more interesting!") >>> writer.commit() >>> # extract documents as a table ... table = etl.fromtextindex(dirname) >>> table +------+-------------------+ | path | title | +======+===================+ | '/a' | 'First document' | +------+-------------------+ | '/b' | 'Second document' | +------+-------------------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument. ### Response: def fromtextindex(index_or_dirname, indexname=None, docnum_field=None): """ Extract all documents from a Whoosh index. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in(dirname, schema) >>> writer = index.writer() >>> writer.add_document(title=u"First document", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Second document", path=u"/b", ... content=u"The second one is even more interesting!") >>> writer.commit() >>> # extract documents as a table ... table = etl.fromtextindex(dirname) >>> table +------+-------------------+ | path | title | +======+===================+ | '/a' | 'First document' | +------+-------------------+ | '/b' | 'Second document' | +------+-------------------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument. """ return TextIndexView(index_or_dirname, indexname=indexname, docnum_field=docnum_field)
def install(): ''' Install weboob system-wide ''' tmp_weboob_dir = '/tmp/weboob' # Check that the directory does not already exists while (os.path.exists(tmp_weboob_dir)): tmp_weboob_dir += '1' # Clone the repository print 'Fetching sources in temporary dir {}'.format(tmp_weboob_dir) result = cmd_exec('git clone {} {}'.format(WEBOOB_REPO, tmp_weboob_dir)) if (result['error']): print result['stderr'] print 'Weboob installation failed: could not clone repository' exit() print 'Sources fetched, will now process to installation' # Launch the installation result = cmd_exec('cd {} && ./setup.py install'.format(tmp_weboob_dir)) # Remove the weboob directory shutil.rmtree(tmp_weboob_dir) if (result['error']): print result['stderr'] print 'Weboob installation failed: setup failed' exit() print result['stdout'] # Check weboob version weboob_version = get_weboob_version() if (not weboob_version): print 'Weboob installation failed: version not detected' exit() print 'Weboob (version: {}) installation succeeded'.format(weboob_version) update()
Install weboob system-wide
Below is the the instruction that describes the task: ### Input: Install weboob system-wide ### Response: def install(): ''' Install weboob system-wide ''' tmp_weboob_dir = '/tmp/weboob' # Check that the directory does not already exists while (os.path.exists(tmp_weboob_dir)): tmp_weboob_dir += '1' # Clone the repository print 'Fetching sources in temporary dir {}'.format(tmp_weboob_dir) result = cmd_exec('git clone {} {}'.format(WEBOOB_REPO, tmp_weboob_dir)) if (result['error']): print result['stderr'] print 'Weboob installation failed: could not clone repository' exit() print 'Sources fetched, will now process to installation' # Launch the installation result = cmd_exec('cd {} && ./setup.py install'.format(tmp_weboob_dir)) # Remove the weboob directory shutil.rmtree(tmp_weboob_dir) if (result['error']): print result['stderr'] print 'Weboob installation failed: setup failed' exit() print result['stdout'] # Check weboob version weboob_version = get_weboob_version() if (not weboob_version): print 'Weboob installation failed: version not detected' exit() print 'Weboob (version: {}) installation succeeded'.format(weboob_version) update()
def get_automations(self, refresh=False, generic_type=None): """Get all automations.""" if refresh or self._automations is None: if self._automations is None: # Set up the device libraries self._automations = {} _LOGGER.info("Updating all automations...") response = self.send_request("get", CONST.AUTOMATION_URL) response_object = json.loads(response.text) if (response_object and not isinstance(response_object, (tuple, list))): response_object = [response_object] _LOGGER.debug("Get Automations Response: %s", response.text) for automation_json in response_object: # Attempt to reuse an existing automation object automation = self._automations.get(str(automation_json['id'])) # No existing automation, create a new one if automation: automation.update(automation_json) else: automation = AbodeAutomation(self, automation_json) self._automations[automation.automation_id] = automation if generic_type: automations = [] for automation in self._automations.values(): if (automation.generic_type is not None and automation.generic_type in generic_type): automations.append(automation) return automations return list(self._automations.values())
Get all automations.
Below is the the instruction that describes the task: ### Input: Get all automations. ### Response: def get_automations(self, refresh=False, generic_type=None): """Get all automations.""" if refresh or self._automations is None: if self._automations is None: # Set up the device libraries self._automations = {} _LOGGER.info("Updating all automations...") response = self.send_request("get", CONST.AUTOMATION_URL) response_object = json.loads(response.text) if (response_object and not isinstance(response_object, (tuple, list))): response_object = [response_object] _LOGGER.debug("Get Automations Response: %s", response.text) for automation_json in response_object: # Attempt to reuse an existing automation object automation = self._automations.get(str(automation_json['id'])) # No existing automation, create a new one if automation: automation.update(automation_json) else: automation = AbodeAutomation(self, automation_json) self._automations[automation.automation_id] = automation if generic_type: automations = [] for automation in self._automations.values(): if (automation.generic_type is not None and automation.generic_type in generic_type): automations.append(automation) return automations return list(self._automations.values())
def edit_permissions(self): """Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission. """ # Get the role that was selected in the CRUD view key = self.current.input['object_id'] self.current.task_data['role_id'] = key role = RoleModel.objects.get(key=key) # Get the cached permission tree, or build a new one if there is none cached # TODO: Add an extra view in case there was no cache, as in 'please wait calculating permissions' permission_tree = self._permission_trees(PermissionModel.objects) # Apply the selected role to the permission tree, setting the 'checked' field # of the permission the role has role_tree = self._apply_role_tree(permission_tree, role) # Apply final formatting, and output the tree to the UI self.output['objects'] = [ { 'type': 'tree-toggle', 'action': 'apply_change', 'trees': self._format_tree_output(role_tree), }, ] self.form_out(PermissionForm())
Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission.
Below is the the instruction that describes the task: ### Input: Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission. ### Response: def edit_permissions(self): """Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission. """ # Get the role that was selected in the CRUD view key = self.current.input['object_id'] self.current.task_data['role_id'] = key role = RoleModel.objects.get(key=key) # Get the cached permission tree, or build a new one if there is none cached # TODO: Add an extra view in case there was no cache, as in 'please wait calculating permissions' permission_tree = self._permission_trees(PermissionModel.objects) # Apply the selected role to the permission tree, setting the 'checked' field # of the permission the role has role_tree = self._apply_role_tree(permission_tree, role) # Apply final formatting, and output the tree to the UI self.output['objects'] = [ { 'type': 'tree-toggle', 'action': 'apply_change', 'trees': self._format_tree_output(role_tree), }, ] self.form_out(PermissionForm())
def get_transform(self, map_from='visual', map_to='render'): """Return a transform mapping between any two coordinate systems. Parameters ---------- map_from : str The starting coordinate system to map from. Must be one of: visual, scene, document, canvas, framebuffer, or render. map_to : str The ending coordinate system to map to. Must be one of: visual, scene, document, canvas, framebuffer, or render. """ tr = ['visual', 'scene', 'document', 'canvas', 'framebuffer', 'render'] ifrom = tr.index(map_from) ito = tr.index(map_to) if ifrom < ito: trs = [getattr(self, '_' + t + '_transform') for t in tr[ifrom:ito]][::-1] else: trs = [getattr(self, '_' + t + '_transform').inverse for t in tr[ito:ifrom]] return self._cache.get(trs)
Return a transform mapping between any two coordinate systems. Parameters ---------- map_from : str The starting coordinate system to map from. Must be one of: visual, scene, document, canvas, framebuffer, or render. map_to : str The ending coordinate system to map to. Must be one of: visual, scene, document, canvas, framebuffer, or render.
Below is the the instruction that describes the task: ### Input: Return a transform mapping between any two coordinate systems. Parameters ---------- map_from : str The starting coordinate system to map from. Must be one of: visual, scene, document, canvas, framebuffer, or render. map_to : str The ending coordinate system to map to. Must be one of: visual, scene, document, canvas, framebuffer, or render. ### Response: def get_transform(self, map_from='visual', map_to='render'): """Return a transform mapping between any two coordinate systems. Parameters ---------- map_from : str The starting coordinate system to map from. Must be one of: visual, scene, document, canvas, framebuffer, or render. map_to : str The ending coordinate system to map to. Must be one of: visual, scene, document, canvas, framebuffer, or render. """ tr = ['visual', 'scene', 'document', 'canvas', 'framebuffer', 'render'] ifrom = tr.index(map_from) ito = tr.index(map_to) if ifrom < ito: trs = [getattr(self, '_' + t + '_transform') for t in tr[ifrom:ito]][::-1] else: trs = [getattr(self, '_' + t + '_transform').inverse for t in tr[ito:ifrom]] return self._cache.get(trs)
def emit(self, signal, value=None, gather=False): """Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value) signal: the name of the signal to emit, must be defined in the classes 'signals' list. value: the value to pass to all connected slot methods. gather: if set, causes emit to return a list of all slot results """ results = [] if gather else True if hasattr(self, 'connections') and signal in self.connections: for condition, values in self.connections[signal].items(): if condition is None or condition == value or (callable(condition) and condition(value)): for slot, transform in values.items(): if transform is not None: if callable(transform): used_value = transform(value) elif isinstance(transform, str): used_value = transform.format(value=value) else: used_value = transform else: used_value = value if used_value is not None: if(accept_arguments(slot, 1)): result = slot(used_value) elif(accept_arguments(slot, 0)): result = slot() else: result = '' else: result = slot() if gather: results.append(result) return results
Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value) signal: the name of the signal to emit, must be defined in the classes 'signals' list. value: the value to pass to all connected slot methods. gather: if set, causes emit to return a list of all slot results
Below is the the instruction that describes the task: ### Input: Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value) signal: the name of the signal to emit, must be defined in the classes 'signals' list. value: the value to pass to all connected slot methods. gather: if set, causes emit to return a list of all slot results ### Response: def emit(self, signal, value=None, gather=False): """Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value) signal: the name of the signal to emit, must be defined in the classes 'signals' list. value: the value to pass to all connected slot methods. gather: if set, causes emit to return a list of all slot results """ results = [] if gather else True if hasattr(self, 'connections') and signal in self.connections: for condition, values in self.connections[signal].items(): if condition is None or condition == value or (callable(condition) and condition(value)): for slot, transform in values.items(): if transform is not None: if callable(transform): used_value = transform(value) elif isinstance(transform, str): used_value = transform.format(value=value) else: used_value = transform else: used_value = value if used_value is not None: if(accept_arguments(slot, 1)): result = slot(used_value) elif(accept_arguments(slot, 0)): result = slot() else: result = '' else: result = slot() if gather: results.append(result) return results
def _validate_plan_base( new_plan, base_plan, is_partition_subset=True, allow_rf_change=False, ): """Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set """ # Verify that partitions in plan are subset of base plan. new_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in new_plan['partitions'] ]) base_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in base_plan['partitions'] ]) if is_partition_subset: invalid_partitions = list(new_partitions - base_partitions) else: # partition set should be equal invalid_partitions = list( new_partitions.union(base_partitions) - new_partitions.intersection(base_partitions), ) if invalid_partitions: _log.error( 'Invalid partition(s) found: {p_list}'.format( p_list=invalid_partitions, ) ) return False # Verify replication-factor remains consistent base_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in base_plan['partitions'] } new_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in new_plan['partitions'] } if not allow_rf_change: invalid_replication_factor = False for new_partition, replicas in six.iteritems(new_partition_replicas): base_replica_cnt = len(base_partition_replicas[new_partition]) if len(replicas) != base_replica_cnt: invalid_replication_factor = True _log.error( 'Replication-factor Mismatch: Partition: {partition}: ' 'Base-replicas: {expected}, Proposed-replicas: {actual}' .format( partition=new_partition, expected=base_partition_replicas[new_partition], actual=replicas, ), ) if invalid_replication_factor: return False # Validation successful return True
Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set
Below is the the instruction that describes the task: ### Input: Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set ### Response: def _validate_plan_base( new_plan, base_plan, is_partition_subset=True, allow_rf_change=False, ): """Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set """ # Verify that partitions in plan are subset of base plan. new_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in new_plan['partitions'] ]) base_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in base_plan['partitions'] ]) if is_partition_subset: invalid_partitions = list(new_partitions - base_partitions) else: # partition set should be equal invalid_partitions = list( new_partitions.union(base_partitions) - new_partitions.intersection(base_partitions), ) if invalid_partitions: _log.error( 'Invalid partition(s) found: {p_list}'.format( p_list=invalid_partitions, ) ) return False # Verify replication-factor remains consistent base_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in base_plan['partitions'] } new_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in new_plan['partitions'] } if not allow_rf_change: invalid_replication_factor = False for new_partition, replicas in six.iteritems(new_partition_replicas): base_replica_cnt = len(base_partition_replicas[new_partition]) if len(replicas) != base_replica_cnt: invalid_replication_factor = True _log.error( 'Replication-factor Mismatch: Partition: {partition}: ' 'Base-replicas: {expected}, Proposed-replicas: {actual}' .format( partition=new_partition, expected=base_partition_replicas[new_partition], actual=replicas, ), ) if invalid_replication_factor: return False # Validation successful return True
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.
Below is the the instruction that describes the task: ### Input: Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed. ### Response: def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
def __create_tcp_top(self, packet): """ witch the complete packet set top header """ length = len(packet) top = pack('<HHI', const.MACHINE_PREPARE_DATA_1, const.MACHINE_PREPARE_DATA_2, length) return top + packet
witch the complete packet set top header
Below is the the instruction that describes the task: ### Input: witch the complete packet set top header ### Response: def __create_tcp_top(self, packet): """ witch the complete packet set top header """ length = len(packet) top = pack('<HHI', const.MACHINE_PREPARE_DATA_1, const.MACHINE_PREPARE_DATA_2, length) return top + packet
def render_template(self, template, **kwargs): """ Use this method on your own endpoints, will pass the extra_args to the templates. :param template: The template relative path :param kwargs: arguments to be passed to the template """ kwargs["base_template"] = self.appbuilder.base_template kwargs["appbuilder"] = self.appbuilder return render_template( template, **dict(list(kwargs.items()) + list(self.extra_args.items())) )
Use this method on your own endpoints, will pass the extra_args to the templates. :param template: The template relative path :param kwargs: arguments to be passed to the template
Below is the the instruction that describes the task: ### Input: Use this method on your own endpoints, will pass the extra_args to the templates. :param template: The template relative path :param kwargs: arguments to be passed to the template ### Response: def render_template(self, template, **kwargs): """ Use this method on your own endpoints, will pass the extra_args to the templates. :param template: The template relative path :param kwargs: arguments to be passed to the template """ kwargs["base_template"] = self.appbuilder.base_template kwargs["appbuilder"] = self.appbuilder return render_template( template, **dict(list(kwargs.items()) + list(self.extra_args.items())) )
def set_seamless_mode(self, enabled): """Enables or disables seamless guest display rendering (seamless desktop integration) mode. Calling this method has no effect if :py:func:`IGuest.get_facility_status` with facility @c Seamless does not return @c Active. in enabled of type bool """ if not isinstance(enabled, bool): raise TypeError("enabled can only be an instance of type bool") self._call("setSeamlessMode", in_p=[enabled])
Enables or disables seamless guest display rendering (seamless desktop integration) mode. Calling this method has no effect if :py:func:`IGuest.get_facility_status` with facility @c Seamless does not return @c Active. in enabled of type bool
Below is the the instruction that describes the task: ### Input: Enables or disables seamless guest display rendering (seamless desktop integration) mode. Calling this method has no effect if :py:func:`IGuest.get_facility_status` with facility @c Seamless does not return @c Active. in enabled of type bool ### Response: def set_seamless_mode(self, enabled): """Enables or disables seamless guest display rendering (seamless desktop integration) mode. Calling this method has no effect if :py:func:`IGuest.get_facility_status` with facility @c Seamless does not return @c Active. in enabled of type bool """ if not isinstance(enabled, bool): raise TypeError("enabled can only be an instance of type bool") self._call("setSeamlessMode", in_p=[enabled])
def _parse_api_value_list(self, values): """Return a list field compatible with the API.""" try: return [v.to_api() for v in values] # Not models except AttributeError: return list(values)
Return a list field compatible with the API.
Below is the the instruction that describes the task: ### Input: Return a list field compatible with the API. ### Response: def _parse_api_value_list(self, values): """Return a list field compatible with the API.""" try: return [v.to_api() for v in values] # Not models except AttributeError: return list(values)
def remove(self, member): """Remove a member from the archive.""" # Make sure we have an info object if isinstance(member, ZipInfo): # 'member' is already an info object zinfo = member else: # Get info object for name zinfo = self.getinfo(member) # compute the location of the file data in the local file header, # by adding the lengths of the records before it zlen = len(zinfo.FileHeader()) + zinfo.compress_size fileidx = self.filelist.index(zinfo) fileofs = sum( [len(self.filelist[f].FileHeader()) + self.filelist[f].compress_size for f in xrange(0, fileidx)] ) self.fp.seek(fileofs + zlen) after = self.fp.read() self.fp.seek(fileofs) self.fp.write(after) self.fp.seek(-zlen, 2) self.fp.truncate() self._didModify = True self.filelist.remove(zinfo) del self.NameToInfo[member]
Remove a member from the archive.
Below is the the instruction that describes the task: ### Input: Remove a member from the archive. ### Response: def remove(self, member): """Remove a member from the archive.""" # Make sure we have an info object if isinstance(member, ZipInfo): # 'member' is already an info object zinfo = member else: # Get info object for name zinfo = self.getinfo(member) # compute the location of the file data in the local file header, # by adding the lengths of the records before it zlen = len(zinfo.FileHeader()) + zinfo.compress_size fileidx = self.filelist.index(zinfo) fileofs = sum( [len(self.filelist[f].FileHeader()) + self.filelist[f].compress_size for f in xrange(0, fileidx)] ) self.fp.seek(fileofs + zlen) after = self.fp.read() self.fp.seek(fileofs) self.fp.write(after) self.fp.seek(-zlen, 2) self.fp.truncate() self._didModify = True self.filelist.remove(zinfo) del self.NameToInfo[member]
def _remove_empty_pars(pars, pars_oi, dims_oi): """ Remove parameters that are actually empty. For example, the parameter y would be removed with the following model code: transformed data { int n; n <- 0; } parameters { real y[n]; } Parameters ---------- pars: iterable of str pars_oi: list of str dims_oi: list of list of int Returns ------- pars_trimmed: list of str """ pars = list(pars) for par, dim in zip(pars_oi, dims_oi): if par in pars and np.prod(dim) == 0: del pars[pars.index(par)] return pars
Remove parameters that are actually empty. For example, the parameter y would be removed with the following model code: transformed data { int n; n <- 0; } parameters { real y[n]; } Parameters ---------- pars: iterable of str pars_oi: list of str dims_oi: list of list of int Returns ------- pars_trimmed: list of str
Below is the the instruction that describes the task: ### Input: Remove parameters that are actually empty. For example, the parameter y would be removed with the following model code: transformed data { int n; n <- 0; } parameters { real y[n]; } Parameters ---------- pars: iterable of str pars_oi: list of str dims_oi: list of list of int Returns ------- pars_trimmed: list of str ### Response: def _remove_empty_pars(pars, pars_oi, dims_oi): """ Remove parameters that are actually empty. For example, the parameter y would be removed with the following model code: transformed data { int n; n <- 0; } parameters { real y[n]; } Parameters ---------- pars: iterable of str pars_oi: list of str dims_oi: list of list of int Returns ------- pars_trimmed: list of str """ pars = list(pars) for par, dim in zip(pars_oi, dims_oi): if par in pars and np.prod(dim) == 0: del pars[pars.index(par)] return pars
def heightmap_lerp_hm( hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray, coef: float ) -> None: """Perform linear interpolation between two heightmaps storing the result in ``hm3``. This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef`` Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to add to the first. hm3 (numpy.ndarray): A destination heightmap to store the result. coef (float): The linear interpolation coefficient. """ lib.TCOD_heightmap_lerp_hm( _heightmap_cdata(hm1), _heightmap_cdata(hm2), _heightmap_cdata(hm3), coef, )
Perform linear interpolation between two heightmaps storing the result in ``hm3``. This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef`` Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to add to the first. hm3 (numpy.ndarray): A destination heightmap to store the result. coef (float): The linear interpolation coefficient.
Below is the the instruction that describes the task: ### Input: Perform linear interpolation between two heightmaps storing the result in ``hm3``. This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef`` Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to add to the first. hm3 (numpy.ndarray): A destination heightmap to store the result. coef (float): The linear interpolation coefficient. ### Response: def heightmap_lerp_hm( hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray, coef: float ) -> None: """Perform linear interpolation between two heightmaps storing the result in ``hm3``. This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef`` Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to add to the first. hm3 (numpy.ndarray): A destination heightmap to store the result. coef (float): The linear interpolation coefficient. """ lib.TCOD_heightmap_lerp_hm( _heightmap_cdata(hm1), _heightmap_cdata(hm2), _heightmap_cdata(hm3), coef, )
def sell_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.SELL, position_effect, cal_style(price, style))
平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None
Below is the the instruction that describes the task: ### Input: 平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None ### Response: def sell_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.SELL, position_effect, cal_style(price, style))
def _new_import(self, import_name): """ Starts a new import. Args: import_name(str): A relative import in the dot syntax (e.g. "first.second.expressions") """ # Import can't be used if meta-model is loaded from string assert self.root_path is not None, \ '"import" statement can not be used if meta-model is ' \ 'loaded from string.' # Find the absolute file name of the import based on the relative # import_name and current namespace current_namespace = self._namespace_stack[-1] if '.' in current_namespace: root_namespace = current_namespace.rsplit('.', 1)[0] import_name = "%s.%s" % (root_namespace, import_name) import_file_name = "%s.tx" % os.path.join(self.root_path, *import_name.split(".")) if import_name not in self.namespaces: self._enter_namespace(import_name) if self.debug: self.dprint("*** IMPORTING FILE: %s" % import_file_name) metamodel_from_file(import_file_name, metamodel=self) self._leave_namespace() # Add the import to the imported_namespaces for current namespace # so that resolving of current grammar searches imported grammars # in the order of import self._imported_namespaces[current_namespace].append( self.namespaces[import_name])
Starts a new import. Args: import_name(str): A relative import in the dot syntax (e.g. "first.second.expressions")
Below is the the instruction that describes the task: ### Input: Starts a new import. Args: import_name(str): A relative import in the dot syntax (e.g. "first.second.expressions") ### Response: def _new_import(self, import_name): """ Starts a new import. Args: import_name(str): A relative import in the dot syntax (e.g. "first.second.expressions") """ # Import can't be used if meta-model is loaded from string assert self.root_path is not None, \ '"import" statement can not be used if meta-model is ' \ 'loaded from string.' # Find the absolute file name of the import based on the relative # import_name and current namespace current_namespace = self._namespace_stack[-1] if '.' in current_namespace: root_namespace = current_namespace.rsplit('.', 1)[0] import_name = "%s.%s" % (root_namespace, import_name) import_file_name = "%s.tx" % os.path.join(self.root_path, *import_name.split(".")) if import_name not in self.namespaces: self._enter_namespace(import_name) if self.debug: self.dprint("*** IMPORTING FILE: %s" % import_file_name) metamodel_from_file(import_file_name, metamodel=self) self._leave_namespace() # Add the import to the imported_namespaces for current namespace # so that resolving of current grammar searches imported grammars # in the order of import self._imported_namespaces[current_namespace].append( self.namespaces[import_name])
def _raw(s): """Get raw representation of s, truncating if too long.""" if isinstance(s, list): s = "\n".join(_raw(item) for item in s) if s == EOF: return "EOF" s = repr(s) # Get raw representation of string s = s[1:-1] # Strip away quotation marks if len(s) > 15: s = s[:15] + "..." # Truncate if too long return s
Get raw representation of s, truncating if too long.
Below is the the instruction that describes the task: ### Input: Get raw representation of s, truncating if too long. ### Response: def _raw(s): """Get raw representation of s, truncating if too long.""" if isinstance(s, list): s = "\n".join(_raw(item) for item in s) if s == EOF: return "EOF" s = repr(s) # Get raw representation of string s = s[1:-1] # Strip away quotation marks if len(s) > 15: s = s[:15] + "..." # Truncate if too long return s
def bufferoutput(self): """ Buffer the whole output until write EOF or flushed. """ new_stream = Stream(writebufferlimit=None) if self._sendHeaders: # An extra copy self.container.subroutine(new_stream.copy_to(self.outputstream, self.container, buffering=False)) self.outputstream = Stream(writebufferlimit=None)
Buffer the whole output until write EOF or flushed.
Below is the the instruction that describes the task: ### Input: Buffer the whole output until write EOF or flushed. ### Response: def bufferoutput(self): """ Buffer the whole output until write EOF or flushed. """ new_stream = Stream(writebufferlimit=None) if self._sendHeaders: # An extra copy self.container.subroutine(new_stream.copy_to(self.outputstream, self.container, buffering=False)) self.outputstream = Stream(writebufferlimit=None)
def merkle(hashes, func=_merkle_hash256): """Convert an iterable of hashes or hashable objects into a binary tree, construct the interior values using a passed-in constructor or compression function, and return the root value of the tree. The default compressor is the hash256 function, resulting in root-hash for the entire tree.""" # We use append to duplicate the final item in the iterable of hashes, so # we need hashes to be a list-like object, regardless of what we were # passed. hashes = list(iter(hashes)) # If the passed-in iterable is empty, allow the constructor to choose our # return value: if not hashes: return func() # We must make sure the constructor/compressor is called for the case of # a single item as well, in which case the loop below is not entered. if len(hashes) == 1: return func(*hashes) # Build up successive layers of the binary hash tree, starting from the # bottom. We've reached the root node when the list has been reduced to # one element. while len(hashes) > 1: # For reasons lost to time, Satoshi decided that any traversal though # a bitcoin hash tree will have the same number steps. This is because # the last element is repeated when there is an odd number of elements # in level, resulting in the right portion of the binary tree being # extended into a full tower. hashes.append(hashes[-1]) # By creating an iterator and then duplicating it, we cause two items # to be pulled out of the hashes array each time through the generator. # The last element is ignored if there is an odd number of elements # (meaning there was originally an even number, because of the append # operation above). hashes = list(func(l,r) for l,r in zip(*(iter(hashes),)*2)) # Return the root node of the Merkle tree to the caller. return hashes[0]
Convert an iterable of hashes or hashable objects into a binary tree, construct the interior values using a passed-in constructor or compression function, and return the root value of the tree. The default compressor is the hash256 function, resulting in root-hash for the entire tree.
Below is the the instruction that describes the task: ### Input: Convert an iterable of hashes or hashable objects into a binary tree, construct the interior values using a passed-in constructor or compression function, and return the root value of the tree. The default compressor is the hash256 function, resulting in root-hash for the entire tree. ### Response: def merkle(hashes, func=_merkle_hash256): """Convert an iterable of hashes or hashable objects into a binary tree, construct the interior values using a passed-in constructor or compression function, and return the root value of the tree. The default compressor is the hash256 function, resulting in root-hash for the entire tree.""" # We use append to duplicate the final item in the iterable of hashes, so # we need hashes to be a list-like object, regardless of what we were # passed. hashes = list(iter(hashes)) # If the passed-in iterable is empty, allow the constructor to choose our # return value: if not hashes: return func() # We must make sure the constructor/compressor is called for the case of # a single item as well, in which case the loop below is not entered. if len(hashes) == 1: return func(*hashes) # Build up successive layers of the binary hash tree, starting from the # bottom. We've reached the root node when the list has been reduced to # one element. while len(hashes) > 1: # For reasons lost to time, Satoshi decided that any traversal though # a bitcoin hash tree will have the same number steps. This is because # the last element is repeated when there is an odd number of elements # in level, resulting in the right portion of the binary tree being # extended into a full tower. hashes.append(hashes[-1]) # By creating an iterator and then duplicating it, we cause two items # to be pulled out of the hashes array each time through the generator. # The last element is ignored if there is an odd number of elements # (meaning there was originally an even number, because of the append # operation above). hashes = list(func(l,r) for l,r in zip(*(iter(hashes),)*2)) # Return the root node of the Merkle tree to the caller. return hashes[0]
def get_environment(self): ''' Return a dictionary representing the environment variables used to set the proxy settings. ''' env = {} if self.http: env['http_proxy'] = env['HTTP_PROXY'] = self.http if self.https: env['https_proxy'] = env['HTTPS_PROXY'] = self.https if self.ftp: env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp if self.no_proxy: env['no_proxy'] = env['NO_PROXY'] = self.no_proxy return env
Return a dictionary representing the environment variables used to set the proxy settings.
Below is the the instruction that describes the task: ### Input: Return a dictionary representing the environment variables used to set the proxy settings. ### Response: def get_environment(self): ''' Return a dictionary representing the environment variables used to set the proxy settings. ''' env = {} if self.http: env['http_proxy'] = env['HTTP_PROXY'] = self.http if self.https: env['https_proxy'] = env['HTTPS_PROXY'] = self.https if self.ftp: env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp if self.no_proxy: env['no_proxy'] = env['NO_PROXY'] = self.no_proxy return env
def _get_window_labels(self, window): """Returns the mapping between sliding window points and their contigs, and the x-axis position of contig Parameters ---------- window : int Size of the window. Returns ------- xbars : list The x-axis position of the ending for each contig. labels : list The x-axis labels for each data point in the sliding window """ # Get summary stats, if they have not yet been triggered if not self.summary_info: self.get_summary_stats() # Get contig boundary positon c = 0 xbars = [] for contig, seq in self.contigs.items(): contig_id = self._get_contig_id(contig) self.contig_boundaries[contig_id] = [c, c + len(seq)] c += len(seq) xbars.append((contig_id, c, contig)) return xbars
Returns the mapping between sliding window points and their contigs, and the x-axis position of contig Parameters ---------- window : int Size of the window. Returns ------- xbars : list The x-axis position of the ending for each contig. labels : list The x-axis labels for each data point in the sliding window
Below is the the instruction that describes the task: ### Input: Returns the mapping between sliding window points and their contigs, and the x-axis position of contig Parameters ---------- window : int Size of the window. Returns ------- xbars : list The x-axis position of the ending for each contig. labels : list The x-axis labels for each data point in the sliding window ### Response: def _get_window_labels(self, window): """Returns the mapping between sliding window points and their contigs, and the x-axis position of contig Parameters ---------- window : int Size of the window. Returns ------- xbars : list The x-axis position of the ending for each contig. labels : list The x-axis labels for each data point in the sliding window """ # Get summary stats, if they have not yet been triggered if not self.summary_info: self.get_summary_stats() # Get contig boundary positon c = 0 xbars = [] for contig, seq in self.contigs.items(): contig_id = self._get_contig_id(contig) self.contig_boundaries[contig_id] = [c, c + len(seq)] c += len(seq) xbars.append((contig_id, c, contig)) return xbars
def _get(self, url, method, host): """Get a request handler based on the URL of the request, or raises an error. Internal method for caching. :param url: request URL :param method: request method :return: handler, arguments, keyword arguments """ url = unquote(host + url) # Check against known static routes route = self.routes_static.get(url) method_not_supported = MethodNotSupported( "Method {} not allowed for URL {}".format(method, url), method=method, allowed_methods=self.get_supported_methods(url), ) if route: if route.methods and method not in route.methods: raise method_not_supported match = route.pattern.match(url) else: route_found = False # Move on to testing all regex routes for route in self.routes_dynamic[url_hash(url)]: match = route.pattern.match(url) route_found |= match is not None # Do early method checking if match and method in route.methods: break else: # Lastly, check against all regex routes that cannot be hashed for route in self.routes_always_check: match = route.pattern.match(url) route_found |= match is not None # Do early method checking if match and method in route.methods: break else: # Route was found but the methods didn't match if route_found: raise method_not_supported raise NotFound("Requested URL {} not found".format(url)) kwargs = { p.name: p.cast(value) for value, p in zip(match.groups(1), route.parameters) } route_handler = route.handler if hasattr(route_handler, "handlers"): route_handler = route_handler.handlers[method] return route_handler, [], kwargs, route.uri
Get a request handler based on the URL of the request, or raises an error. Internal method for caching. :param url: request URL :param method: request method :return: handler, arguments, keyword arguments
Below is the the instruction that describes the task: ### Input: Get a request handler based on the URL of the request, or raises an error. Internal method for caching. :param url: request URL :param method: request method :return: handler, arguments, keyword arguments ### Response: def _get(self, url, method, host): """Get a request handler based on the URL of the request, or raises an error. Internal method for caching. :param url: request URL :param method: request method :return: handler, arguments, keyword arguments """ url = unquote(host + url) # Check against known static routes route = self.routes_static.get(url) method_not_supported = MethodNotSupported( "Method {} not allowed for URL {}".format(method, url), method=method, allowed_methods=self.get_supported_methods(url), ) if route: if route.methods and method not in route.methods: raise method_not_supported match = route.pattern.match(url) else: route_found = False # Move on to testing all regex routes for route in self.routes_dynamic[url_hash(url)]: match = route.pattern.match(url) route_found |= match is not None # Do early method checking if match and method in route.methods: break else: # Lastly, check against all regex routes that cannot be hashed for route in self.routes_always_check: match = route.pattern.match(url) route_found |= match is not None # Do early method checking if match and method in route.methods: break else: # Route was found but the methods didn't match if route_found: raise method_not_supported raise NotFound("Requested URL {} not found".format(url)) kwargs = { p.name: p.cast(value) for value, p in zip(match.groups(1), route.parameters) } route_handler = route.handler if hasattr(route_handler, "handlers"): route_handler = route_handler.handlers[method] return route_handler, [], kwargs, route.uri
def setup(self, environ): '''Called once to setup the list of wsgi middleware.''' json_handler = Root().putSubHandler('calc', Calculator()) middleware = wsgi.Router('/', post=json_handler, accept_content_types=JSON_CONTENT_TYPES) response = [wsgi.GZipMiddleware(200)] return wsgi.WsgiHandler(middleware=[wsgi.wait_for_body_middleware, middleware], response_middleware=response)
Called once to setup the list of wsgi middleware.
Below is the the instruction that describes the task: ### Input: Called once to setup the list of wsgi middleware. ### Response: def setup(self, environ): '''Called once to setup the list of wsgi middleware.''' json_handler = Root().putSubHandler('calc', Calculator()) middleware = wsgi.Router('/', post=json_handler, accept_content_types=JSON_CONTENT_TYPES) response = [wsgi.GZipMiddleware(200)] return wsgi.WsgiHandler(middleware=[wsgi.wait_for_body_middleware, middleware], response_middleware=response)
def add_observer( self, callable_, entity_type=None, action=None, entity_id=None, predicate=None): """Register an "on-model-change" callback Once the model is connected, ``callable_`` will be called each time the model changes. ``callable_`` should be Awaitable and accept the following positional arguments: delta - An instance of :class:`juju.delta.EntityDelta` containing the raw delta data recv'd from the Juju websocket. old_obj - If the delta modifies an existing object in the model, old_obj will be a copy of that object, as it was before the delta was applied. Will be None if the delta creates a new entity in the model. new_obj - A copy of the new or updated object, after the delta is applied. Will be None if the delta removes an entity from the model. model - The :class:`Model` itself. Events for which ``callable_`` is called can be specified by passing entity_type, action, and/or entitiy_id filter criteria, e.g.:: add_observer( myfunc, entity_type='application', action='add', entity_id='ubuntu') For more complex filtering conditions, pass a predicate function. It will be called with a delta as its only argument. If the predicate function returns True, the ``callable_`` will be called. """ observer = _Observer( callable_, entity_type, action, entity_id, predicate) self._observers[observer] = callable_
Register an "on-model-change" callback Once the model is connected, ``callable_`` will be called each time the model changes. ``callable_`` should be Awaitable and accept the following positional arguments: delta - An instance of :class:`juju.delta.EntityDelta` containing the raw delta data recv'd from the Juju websocket. old_obj - If the delta modifies an existing object in the model, old_obj will be a copy of that object, as it was before the delta was applied. Will be None if the delta creates a new entity in the model. new_obj - A copy of the new or updated object, after the delta is applied. Will be None if the delta removes an entity from the model. model - The :class:`Model` itself. Events for which ``callable_`` is called can be specified by passing entity_type, action, and/or entitiy_id filter criteria, e.g.:: add_observer( myfunc, entity_type='application', action='add', entity_id='ubuntu') For more complex filtering conditions, pass a predicate function. It will be called with a delta as its only argument. If the predicate function returns True, the ``callable_`` will be called.
Below is the the instruction that describes the task: ### Input: Register an "on-model-change" callback Once the model is connected, ``callable_`` will be called each time the model changes. ``callable_`` should be Awaitable and accept the following positional arguments: delta - An instance of :class:`juju.delta.EntityDelta` containing the raw delta data recv'd from the Juju websocket. old_obj - If the delta modifies an existing object in the model, old_obj will be a copy of that object, as it was before the delta was applied. Will be None if the delta creates a new entity in the model. new_obj - A copy of the new or updated object, after the delta is applied. Will be None if the delta removes an entity from the model. model - The :class:`Model` itself. Events for which ``callable_`` is called can be specified by passing entity_type, action, and/or entitiy_id filter criteria, e.g.:: add_observer( myfunc, entity_type='application', action='add', entity_id='ubuntu') For more complex filtering conditions, pass a predicate function. It will be called with a delta as its only argument. If the predicate function returns True, the ``callable_`` will be called. ### Response: def add_observer( self, callable_, entity_type=None, action=None, entity_id=None, predicate=None): """Register an "on-model-change" callback Once the model is connected, ``callable_`` will be called each time the model changes. ``callable_`` should be Awaitable and accept the following positional arguments: delta - An instance of :class:`juju.delta.EntityDelta` containing the raw delta data recv'd from the Juju websocket. old_obj - If the delta modifies an existing object in the model, old_obj will be a copy of that object, as it was before the delta was applied. Will be None if the delta creates a new entity in the model. new_obj - A copy of the new or updated object, after the delta is applied. Will be None if the delta removes an entity from the model. model - The :class:`Model` itself. Events for which ``callable_`` is called can be specified by passing entity_type, action, and/or entitiy_id filter criteria, e.g.:: add_observer( myfunc, entity_type='application', action='add', entity_id='ubuntu') For more complex filtering conditions, pass a predicate function. It will be called with a delta as its only argument. If the predicate function returns True, the ``callable_`` will be called. """ observer = _Observer( callable_, entity_type, action, entity_id, predicate) self._observers[observer] = callable_
def extend(self, items): """ Adds @items to the end of the list -> #int length of list after operation """ if items: if self.serialized: items = list(map(self._dumps, items)) self._client.rpush(self.key_prefix, *items)
Adds @items to the end of the list -> #int length of list after operation
Below is the the instruction that describes the task: ### Input: Adds @items to the end of the list -> #int length of list after operation ### Response: def extend(self, items): """ Adds @items to the end of the list -> #int length of list after operation """ if items: if self.serialized: items = list(map(self._dumps, items)) self._client.rpush(self.key_prefix, *items)
def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, uncertainty_method='BAR',maximum_iterations=500, relative_tolerance=1.0e-12, verbose=False, method='false-position', iterated_solution=True): """Compute free energy difference using the Bennett acceptance ratio (BAR) method. Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float, optional, default=0.0 DeltaF can be set to initialize the free energy difference with a guess compute_uncertainty : bool, optional, default=True if False, only the free energy is returned uncertainty_method: string, optional, default=BAR There are two possible uncertainty estimates for BAR. One agrees with MBAR for two states exactly; The other only agrees with MBAR in the limit of good overlap. See below. maximum_iterations : int, optional, default=500 can be set to limit the maximum number of iterations performed relative_tolerance : float, optional, default=1E-11 can be set to determine the relative tolerance convergence criteria (defailt 1.0e-11) verbose : bool should be set to True if verbse debug output is desired (default False) method : str, optional, defualt='false-position' choice of method to solve BAR nonlinear equations, one of 'self-consistent-iteration' or 'false-position' (default: 'false-position') iterated_solution : bool, optional, default=True whether to fully solve the optimized BAR equation to consistency, or to stop after one step, to be equivalent to transition matrix sampling. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference 'dDelta_f': float Estimated standard deviation of free energy difference References ---------- [1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium measurements using maximum-likelihood methods. PRL 91(14):140601, 2003. Notes ----- The false position method is used to solve the implicit equation. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = BAR(w_F, w_R) >>> print('Free energy difference is {:.3f} +- {:.3f} kT'.format(results['Delta_f'], results['dDelta_f'])) Free energy difference is 1.088 +- 0.050 kT Test completion of various other schemes. >>> results = BAR(w_F, w_R, method='self-consistent-iteration') >>> results = BAR(w_F, w_R, method='false-position') >>> results = BAR(w_F, w_R, method='bisection') """ result_vals = dict() # if computing nonoptimized, one step value, we set the max-iterations # to 1, and the method to 'self-consistent-iteration' if not iterated_solution: maximum_iterations = 1 method = 'self-consistent-iteration' DeltaF_initial = DeltaF if method == 'self-consistent-iteration': nfunc = 0 if method == 'bisection' or method == 'false-position': UpperB = EXP(w_F)['Delta_f'] LowerB = -EXP(w_R)['Delta_f'] FUpperB = BARzero(w_F, w_R, UpperB) FLowerB = BARzero(w_F, w_R, LowerB) nfunc = 2 if (np.isnan(FUpperB) or np.isnan(FLowerB)): # this data set is returning NAN -- will likely not work. Return 0, print a warning: # consider returning more information about failure print("Warning: BAR is likely to be inaccurate because of poor overlap. Improve the sampling, or decrease the spacing betweeen states. For now, guessing that the free energy difference is 0 with no uncertainty.") if compute_uncertainty: result_vals['Delta_f'] = 0.0 result_vals['dDelta_f'] = 0.0 return result_vals else: result_vals['Delta_f'] = 0.0 return result_vals while FUpperB * FLowerB > 0: # if they have the same sign, they do not bracket. Widen the bracket until they have opposite signs. # There may be a better way to do this, and the above bracket should rarely fail. if verbose: print('Initial brackets did not actually bracket, widening them') FAve = (UpperB + LowerB) / 2 UpperB = UpperB - max(abs(UpperB - FAve), 0.1) LowerB = LowerB + max(abs(LowerB - FAve), 0.1) FUpperB = BARzero(w_F, w_R, UpperB) FLowerB = BARzero(w_F, w_R, LowerB) nfunc += 2 # Iterate to convergence or until maximum number of iterations has been exceeded. for iteration in range(maximum_iterations): DeltaF_old = DeltaF if method == 'false-position': # Predict the new value if (LowerB == 0.0) and (UpperB == 0.0): DeltaF = 0.0 FNew = 0.0 else: DeltaF = UpperB - FUpperB * (UpperB - LowerB) / (FUpperB - FLowerB) FNew = BARzero(w_F, w_R, DeltaF) nfunc += 1 if FNew == 0: # Convergence is achieved. if verbose: print('Convergence achieved.') relative_change = 10 ** (-15) break if method == 'bisection': # Predict the new value DeltaF = (UpperB + LowerB) / 2 FNew = BARzero(w_F, w_R, DeltaF) nfunc += 1 if method == 'self-consistent-iteration': DeltaF = -BARzero(w_F, w_R, DeltaF) + DeltaF nfunc += 1 # Check for convergence. if (DeltaF == 0.0): # The free energy difference appears to be zero -- return. if verbose: print('The free energy difference appears to be zero.') break if iterated_solution: relative_change = abs((DeltaF - DeltaF_old) / DeltaF) if verbose: print("relative_change = {:12.3f}".format(relative_change)) if ((iteration > 0) and (relative_change < relative_tolerance)): # Convergence is achieved. if verbose: print("Convergence achieved.") break if method == 'false-position' or method == 'bisection': if FUpperB * FNew < 0: # these two now bracket the root LowerB = DeltaF FLowerB = FNew elif FLowerB * FNew <= 0: # these two now bracket the root UpperB = DeltaF FUpperB = FNew else: message = 'WARNING: Cannot determine bound on free energy' raise BoundsError(message) if verbose: print("iteration {:5d}: DeltaF = {:16.3f}".format(iteration, DeltaF)) # Report convergence, or warn user if not achieved. if iterated_solution: if iteration < maximum_iterations: if verbose: print('Converged to tolerance of {:e} in {:d} iterations ({:d} function evaluations)'.format(relative_change, iteration, nfunc)) else: message = 'WARNING: Did not converge to within specified tolerance. max_delta = {:f}, TOLERANCE = {:f}, MAX_ITS = %d'.format(relative_change, relative_tolerance, maximum_iterations) raise ConvergenceError(message) if compute_uncertainty: ''' Compute asymptotic variance estimate using Eq. 10a of Bennett, 1976 (except with n_1<f>_1^2 in the second denominator, it is an error in the original NOTE: The 'BAR' and 'MBAR' estimators do not agree for poor overlap. This is not because of numerical precision, but because they are fundamentally different estimators. For poor overlap, 'MBAR' diverges high, and 'BAR' diverges by being too low. In situations they are noticeably from each other, they are also pretty different from the true answer (obtained by calculating the standard deviation over lots of realizations). First, we examine the 'BAR' equation. Rederive from Bennett, substituting (8) into (7) (8) -> W = [q0/n0 exp(-U1) + q1/n1 exp(-U0)]^-1 <(W exp(-U1))^2 >_0 <(W exp(-U0))^2 >_1 (7) -> ----------------------- + ----------------------- - 1/n0 - 1/n1 n_0 [<(W exp(-U1)>_0]^2 n_1 [<(W exp(-U0)>_1]^2 Const cancels out of top and bottom. Wexp(-U0) = [q0/n0 exp(-(U1-U0)) + q1/n1]^-1 = n1/q1 [n1/n0 q0/q1 exp(-(U1-U0)) + 1]^-1 = n1/q1 [exp (M+(F1-F0)-(U1-U0)+1)^-1] = n1/q1 f(x) Wexp(-U1) = [q0/n0 + q1/n1 exp(-(U0-U1))]^-1 = n0/q0 [1 + n0/n1 q1/q0 exp(-(U0-U1))]^-1 = n0/q0 [1 + exp(-M+[F0-F1)-(U0-U1))]^-1 = n0/q0 f(-x) <(W exp(-U1))^2 >_0 <(W exp(-U0))^2 >_1 (7) -> ----------------------- + ----------------------- - 1/n0 - 1/n1 n_0 [<(W exp(-U1)>_0]^2 n_1 [<(W exp(-U0)>_1]^2 <[n0/q0 f(-x)]^2>_0 <[n1/q1 f(x)]^2>_1 ----------------------- + ------------------------ -1/n0 -1/n1 n_0 <n0/q0 f(-x)>_0^2 n_1 <n1/q1 f(x)>_1^2 1 <[f(-x)]^2>_0 1 <[f(x)]^2>_1 - [----------------------- - 1] + - [------------------------ - 1] n0 <f(-x)>_0^2 n1 n_1<f(x)>_1^2 where f = the fermi function, 1/(1+exp(-x)) This formula the 'BAR' equation works for works for free energies (F0-F1) that don't satisfy the BAR equation. The 'MBAR' equation, detailed below, only works for free energies that satisfy the equation. Now, let's look at the MBAR version of the uncertainty. This is written (from Shirts and Chodera, JPC, 129, 124105, Equation E9) as [ n0<f(x)f(-x)>_0 + n1<f(x)f(-x)_1 ]^-1 - n0^-1 - n1^-1 we note the f(-x) + f(x) = 1, and change this to: [ n0<(1-f(-x)f(-x)>_0 + n1<f(x)(1-f(x))_1 ]^-1 - n0^-1 - n1^-1 [ n0<f(-x)-f(-x)^2)>_0 + n1<f(x)-f(x)^2)_1 ]^-1 - n0^-1 - n1^-1 1 1 1 -------------------------------------------------------------------- - --- - --- n0 <f(-x)>_0 - n0 <[f(-x)]^2>_0 + n1 <f(x)>_1 + n1 <[f(x)]^2>_1 n0 n1 Removing the factor of - (T_F + T_R)/(T_F*T_R)) from both, we compare: <[f(-x)]^2>_0 <[f(x)]^2>_1 [------------------] + [---------------] n0 <f(-x)>_0^2 n1 <f(x)>_1^2 1 -------------------------------------------------------------------- n0 <f(-x)>_0 - n0 <[f(-x)]^2>_0 + n1 <f(x)>_1 + n1 <[f(x)]^2>_1 denote: <f(-x)>_0 = afF <f(-x)^2>_0 = afF2 <f(x)>_1 = afR <f(x)^2>_1 = afF2 Then we can look at both of these as: variance_BAR = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R variance_MBAR = 1/(afF*T_F - afF2*T_F + afR*T_R - afR2*T_R) Rearranging: variance_BAR = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R variance_MBAR = 1/(afF*T_F + afR*T_R - (afF2*T_F + afR2*T_R)) # check the steps below? Not quite sure. variance_BAR = (afF2/afF**2) + (afR2/afR**2) = (afF2 + afR2)/afR**2 variance_MBAR = 1/(afF + afR - (afF2 + afR2)) = 1/(2*afR-(afF2+afR2)) Definitely not the same. Now, the reason that they both work for high overlap is still not clear. We will determine the difference at some point. see https://github.com/choderalab/pymbar/issues/281 for more information. Now implement the two computations. ''' # Determine number of forward and reverse work values provided. T_F = float(w_F.size) # number of forward work values T_R = float(w_R.size) # number of reverse work values # Compute log ratio of forward and reverse counts. M = np.log(T_F / T_R) if iterated_solution: C = M - DeltaF else: C = M - DeltaF_initial # In theory, overflow handling should not be needed now, because we use numlogexp or a custom routine? # fF = 1 / (1 + np.exp(w_F + C)), but we need to handle overflows exp_arg_F = (w_F + C) max_arg_F = np.max(exp_arg_F) log_fF = - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F)) afF = np.exp(logsumexp(log_fF)-max_arg_F)/T_F # fR = 1 / (1 + np.exp(w_R - C)), but we need to handle overflows exp_arg_R = (w_R - C) max_arg_R = np.max(exp_arg_R) log_fR = - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R)) afR = np.exp(logsumexp(log_fR)-max_arg_R)/T_R afF2 = np.exp(logsumexp(2*log_fF)-2*max_arg_F)/T_F afR2 = np.exp(logsumexp(2*log_fR)-2*max_arg_R)/T_R nrat = (T_F + T_R)/(T_F * T_R) # same for both methods if uncertainty_method == 'BAR': variance = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R - nrat dDeltaF = np.sqrt(variance) elif uncertainty_method == 'MBAR': # OR equivalently vartemp = ((afF - afF2)*T_F + (afR - afR2)*T_R) dDeltaF = np.sqrt(1.0/vartemp - nrat) else: message = 'ERROR: BAR uncertainty method {:s} is not defined'.format(uncertainty_method) raise ParameterError(message) if verbose: print("DeltaF = {:8.3f} +- {:8.3f}".format(DeltaF, dDeltaF)) result_vals['Delta_f'] = DeltaF result_vals['dDelta_f'] = dDeltaF else: if verbose: print("DeltaF = {:8.3f}".format(DeltaF)) result_vals['Delta_f'] = DeltaF return result_vals
Compute free energy difference using the Bennett acceptance ratio (BAR) method. Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float, optional, default=0.0 DeltaF can be set to initialize the free energy difference with a guess compute_uncertainty : bool, optional, default=True if False, only the free energy is returned uncertainty_method: string, optional, default=BAR There are two possible uncertainty estimates for BAR. One agrees with MBAR for two states exactly; The other only agrees with MBAR in the limit of good overlap. See below. maximum_iterations : int, optional, default=500 can be set to limit the maximum number of iterations performed relative_tolerance : float, optional, default=1E-11 can be set to determine the relative tolerance convergence criteria (defailt 1.0e-11) verbose : bool should be set to True if verbse debug output is desired (default False) method : str, optional, defualt='false-position' choice of method to solve BAR nonlinear equations, one of 'self-consistent-iteration' or 'false-position' (default: 'false-position') iterated_solution : bool, optional, default=True whether to fully solve the optimized BAR equation to consistency, or to stop after one step, to be equivalent to transition matrix sampling. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference 'dDelta_f': float Estimated standard deviation of free energy difference References ---------- [1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium measurements using maximum-likelihood methods. PRL 91(14):140601, 2003. Notes ----- The false position method is used to solve the implicit equation. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = BAR(w_F, w_R) >>> print('Free energy difference is {:.3f} +- {:.3f} kT'.format(results['Delta_f'], results['dDelta_f'])) Free energy difference is 1.088 +- 0.050 kT Test completion of various other schemes. >>> results = BAR(w_F, w_R, method='self-consistent-iteration') >>> results = BAR(w_F, w_R, method='false-position') >>> results = BAR(w_F, w_R, method='bisection')
Below is the the instruction that describes the task: ### Input: Compute free energy difference using the Bennett acceptance ratio (BAR) method. Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float, optional, default=0.0 DeltaF can be set to initialize the free energy difference with a guess compute_uncertainty : bool, optional, default=True if False, only the free energy is returned uncertainty_method: string, optional, default=BAR There are two possible uncertainty estimates for BAR. One agrees with MBAR for two states exactly; The other only agrees with MBAR in the limit of good overlap. See below. maximum_iterations : int, optional, default=500 can be set to limit the maximum number of iterations performed relative_tolerance : float, optional, default=1E-11 can be set to determine the relative tolerance convergence criteria (defailt 1.0e-11) verbose : bool should be set to True if verbse debug output is desired (default False) method : str, optional, defualt='false-position' choice of method to solve BAR nonlinear equations, one of 'self-consistent-iteration' or 'false-position' (default: 'false-position') iterated_solution : bool, optional, default=True whether to fully solve the optimized BAR equation to consistency, or to stop after one step, to be equivalent to transition matrix sampling. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference 'dDelta_f': float Estimated standard deviation of free energy difference References ---------- [1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium measurements using maximum-likelihood methods. PRL 91(14):140601, 2003. Notes ----- The false position method is used to solve the implicit equation. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = BAR(w_F, w_R) >>> print('Free energy difference is {:.3f} +- {:.3f} kT'.format(results['Delta_f'], results['dDelta_f'])) Free energy difference is 1.088 +- 0.050 kT Test completion of various other schemes. >>> results = BAR(w_F, w_R, method='self-consistent-iteration') >>> results = BAR(w_F, w_R, method='false-position') >>> results = BAR(w_F, w_R, method='bisection') ### Response: def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, uncertainty_method='BAR',maximum_iterations=500, relative_tolerance=1.0e-12, verbose=False, method='false-position', iterated_solution=True): """Compute free energy difference using the Bennett acceptance ratio (BAR) method. Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float, optional, default=0.0 DeltaF can be set to initialize the free energy difference with a guess compute_uncertainty : bool, optional, default=True if False, only the free energy is returned uncertainty_method: string, optional, default=BAR There are two possible uncertainty estimates for BAR. One agrees with MBAR for two states exactly; The other only agrees with MBAR in the limit of good overlap. See below. maximum_iterations : int, optional, default=500 can be set to limit the maximum number of iterations performed relative_tolerance : float, optional, default=1E-11 can be set to determine the relative tolerance convergence criteria (defailt 1.0e-11) verbose : bool should be set to True if verbse debug output is desired (default False) method : str, optional, defualt='false-position' choice of method to solve BAR nonlinear equations, one of 'self-consistent-iteration' or 'false-position' (default: 'false-position') iterated_solution : bool, optional, default=True whether to fully solve the optimized BAR equation to consistency, or to stop after one step, to be equivalent to transition matrix sampling. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference 'dDelta_f': float Estimated standard deviation of free energy difference References ---------- [1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium measurements using maximum-likelihood methods. PRL 91(14):140601, 2003. Notes ----- The false position method is used to solve the implicit equation. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = BAR(w_F, w_R) >>> print('Free energy difference is {:.3f} +- {:.3f} kT'.format(results['Delta_f'], results['dDelta_f'])) Free energy difference is 1.088 +- 0.050 kT Test completion of various other schemes. >>> results = BAR(w_F, w_R, method='self-consistent-iteration') >>> results = BAR(w_F, w_R, method='false-position') >>> results = BAR(w_F, w_R, method='bisection') """ result_vals = dict() # if computing nonoptimized, one step value, we set the max-iterations # to 1, and the method to 'self-consistent-iteration' if not iterated_solution: maximum_iterations = 1 method = 'self-consistent-iteration' DeltaF_initial = DeltaF if method == 'self-consistent-iteration': nfunc = 0 if method == 'bisection' or method == 'false-position': UpperB = EXP(w_F)['Delta_f'] LowerB = -EXP(w_R)['Delta_f'] FUpperB = BARzero(w_F, w_R, UpperB) FLowerB = BARzero(w_F, w_R, LowerB) nfunc = 2 if (np.isnan(FUpperB) or np.isnan(FLowerB)): # this data set is returning NAN -- will likely not work. Return 0, print a warning: # consider returning more information about failure print("Warning: BAR is likely to be inaccurate because of poor overlap. Improve the sampling, or decrease the spacing betweeen states. For now, guessing that the free energy difference is 0 with no uncertainty.") if compute_uncertainty: result_vals['Delta_f'] = 0.0 result_vals['dDelta_f'] = 0.0 return result_vals else: result_vals['Delta_f'] = 0.0 return result_vals while FUpperB * FLowerB > 0: # if they have the same sign, they do not bracket. Widen the bracket until they have opposite signs. # There may be a better way to do this, and the above bracket should rarely fail. if verbose: print('Initial brackets did not actually bracket, widening them') FAve = (UpperB + LowerB) / 2 UpperB = UpperB - max(abs(UpperB - FAve), 0.1) LowerB = LowerB + max(abs(LowerB - FAve), 0.1) FUpperB = BARzero(w_F, w_R, UpperB) FLowerB = BARzero(w_F, w_R, LowerB) nfunc += 2 # Iterate to convergence or until maximum number of iterations has been exceeded. for iteration in range(maximum_iterations): DeltaF_old = DeltaF if method == 'false-position': # Predict the new value if (LowerB == 0.0) and (UpperB == 0.0): DeltaF = 0.0 FNew = 0.0 else: DeltaF = UpperB - FUpperB * (UpperB - LowerB) / (FUpperB - FLowerB) FNew = BARzero(w_F, w_R, DeltaF) nfunc += 1 if FNew == 0: # Convergence is achieved. if verbose: print('Convergence achieved.') relative_change = 10 ** (-15) break if method == 'bisection': # Predict the new value DeltaF = (UpperB + LowerB) / 2 FNew = BARzero(w_F, w_R, DeltaF) nfunc += 1 if method == 'self-consistent-iteration': DeltaF = -BARzero(w_F, w_R, DeltaF) + DeltaF nfunc += 1 # Check for convergence. if (DeltaF == 0.0): # The free energy difference appears to be zero -- return. if verbose: print('The free energy difference appears to be zero.') break if iterated_solution: relative_change = abs((DeltaF - DeltaF_old) / DeltaF) if verbose: print("relative_change = {:12.3f}".format(relative_change)) if ((iteration > 0) and (relative_change < relative_tolerance)): # Convergence is achieved. if verbose: print("Convergence achieved.") break if method == 'false-position' or method == 'bisection': if FUpperB * FNew < 0: # these two now bracket the root LowerB = DeltaF FLowerB = FNew elif FLowerB * FNew <= 0: # these two now bracket the root UpperB = DeltaF FUpperB = FNew else: message = 'WARNING: Cannot determine bound on free energy' raise BoundsError(message) if verbose: print("iteration {:5d}: DeltaF = {:16.3f}".format(iteration, DeltaF)) # Report convergence, or warn user if not achieved. if iterated_solution: if iteration < maximum_iterations: if verbose: print('Converged to tolerance of {:e} in {:d} iterations ({:d} function evaluations)'.format(relative_change, iteration, nfunc)) else: message = 'WARNING: Did not converge to within specified tolerance. max_delta = {:f}, TOLERANCE = {:f}, MAX_ITS = %d'.format(relative_change, relative_tolerance, maximum_iterations) raise ConvergenceError(message) if compute_uncertainty: ''' Compute asymptotic variance estimate using Eq. 10a of Bennett, 1976 (except with n_1<f>_1^2 in the second denominator, it is an error in the original NOTE: The 'BAR' and 'MBAR' estimators do not agree for poor overlap. This is not because of numerical precision, but because they are fundamentally different estimators. For poor overlap, 'MBAR' diverges high, and 'BAR' diverges by being too low. In situations they are noticeably from each other, they are also pretty different from the true answer (obtained by calculating the standard deviation over lots of realizations). First, we examine the 'BAR' equation. Rederive from Bennett, substituting (8) into (7) (8) -> W = [q0/n0 exp(-U1) + q1/n1 exp(-U0)]^-1 <(W exp(-U1))^2 >_0 <(W exp(-U0))^2 >_1 (7) -> ----------------------- + ----------------------- - 1/n0 - 1/n1 n_0 [<(W exp(-U1)>_0]^2 n_1 [<(W exp(-U0)>_1]^2 Const cancels out of top and bottom. Wexp(-U0) = [q0/n0 exp(-(U1-U0)) + q1/n1]^-1 = n1/q1 [n1/n0 q0/q1 exp(-(U1-U0)) + 1]^-1 = n1/q1 [exp (M+(F1-F0)-(U1-U0)+1)^-1] = n1/q1 f(x) Wexp(-U1) = [q0/n0 + q1/n1 exp(-(U0-U1))]^-1 = n0/q0 [1 + n0/n1 q1/q0 exp(-(U0-U1))]^-1 = n0/q0 [1 + exp(-M+[F0-F1)-(U0-U1))]^-1 = n0/q0 f(-x) <(W exp(-U1))^2 >_0 <(W exp(-U0))^2 >_1 (7) -> ----------------------- + ----------------------- - 1/n0 - 1/n1 n_0 [<(W exp(-U1)>_0]^2 n_1 [<(W exp(-U0)>_1]^2 <[n0/q0 f(-x)]^2>_0 <[n1/q1 f(x)]^2>_1 ----------------------- + ------------------------ -1/n0 -1/n1 n_0 <n0/q0 f(-x)>_0^2 n_1 <n1/q1 f(x)>_1^2 1 <[f(-x)]^2>_0 1 <[f(x)]^2>_1 - [----------------------- - 1] + - [------------------------ - 1] n0 <f(-x)>_0^2 n1 n_1<f(x)>_1^2 where f = the fermi function, 1/(1+exp(-x)) This formula the 'BAR' equation works for works for free energies (F0-F1) that don't satisfy the BAR equation. The 'MBAR' equation, detailed below, only works for free energies that satisfy the equation. Now, let's look at the MBAR version of the uncertainty. This is written (from Shirts and Chodera, JPC, 129, 124105, Equation E9) as [ n0<f(x)f(-x)>_0 + n1<f(x)f(-x)_1 ]^-1 - n0^-1 - n1^-1 we note the f(-x) + f(x) = 1, and change this to: [ n0<(1-f(-x)f(-x)>_0 + n1<f(x)(1-f(x))_1 ]^-1 - n0^-1 - n1^-1 [ n0<f(-x)-f(-x)^2)>_0 + n1<f(x)-f(x)^2)_1 ]^-1 - n0^-1 - n1^-1 1 1 1 -------------------------------------------------------------------- - --- - --- n0 <f(-x)>_0 - n0 <[f(-x)]^2>_0 + n1 <f(x)>_1 + n1 <[f(x)]^2>_1 n0 n1 Removing the factor of - (T_F + T_R)/(T_F*T_R)) from both, we compare: <[f(-x)]^2>_0 <[f(x)]^2>_1 [------------------] + [---------------] n0 <f(-x)>_0^2 n1 <f(x)>_1^2 1 -------------------------------------------------------------------- n0 <f(-x)>_0 - n0 <[f(-x)]^2>_0 + n1 <f(x)>_1 + n1 <[f(x)]^2>_1 denote: <f(-x)>_0 = afF <f(-x)^2>_0 = afF2 <f(x)>_1 = afR <f(x)^2>_1 = afF2 Then we can look at both of these as: variance_BAR = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R variance_MBAR = 1/(afF*T_F - afF2*T_F + afR*T_R - afR2*T_R) Rearranging: variance_BAR = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R variance_MBAR = 1/(afF*T_F + afR*T_R - (afF2*T_F + afR2*T_R)) # check the steps below? Not quite sure. variance_BAR = (afF2/afF**2) + (afR2/afR**2) = (afF2 + afR2)/afR**2 variance_MBAR = 1/(afF + afR - (afF2 + afR2)) = 1/(2*afR-(afF2+afR2)) Definitely not the same. Now, the reason that they both work for high overlap is still not clear. We will determine the difference at some point. see https://github.com/choderalab/pymbar/issues/281 for more information. Now implement the two computations. ''' # Determine number of forward and reverse work values provided. T_F = float(w_F.size) # number of forward work values T_R = float(w_R.size) # number of reverse work values # Compute log ratio of forward and reverse counts. M = np.log(T_F / T_R) if iterated_solution: C = M - DeltaF else: C = M - DeltaF_initial # In theory, overflow handling should not be needed now, because we use numlogexp or a custom routine? # fF = 1 / (1 + np.exp(w_F + C)), but we need to handle overflows exp_arg_F = (w_F + C) max_arg_F = np.max(exp_arg_F) log_fF = - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F)) afF = np.exp(logsumexp(log_fF)-max_arg_F)/T_F # fR = 1 / (1 + np.exp(w_R - C)), but we need to handle overflows exp_arg_R = (w_R - C) max_arg_R = np.max(exp_arg_R) log_fR = - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R)) afR = np.exp(logsumexp(log_fR)-max_arg_R)/T_R afF2 = np.exp(logsumexp(2*log_fF)-2*max_arg_F)/T_F afR2 = np.exp(logsumexp(2*log_fR)-2*max_arg_R)/T_R nrat = (T_F + T_R)/(T_F * T_R) # same for both methods if uncertainty_method == 'BAR': variance = (afF2/afF**2)/T_F + (afR2/afR**2)/T_R - nrat dDeltaF = np.sqrt(variance) elif uncertainty_method == 'MBAR': # OR equivalently vartemp = ((afF - afF2)*T_F + (afR - afR2)*T_R) dDeltaF = np.sqrt(1.0/vartemp - nrat) else: message = 'ERROR: BAR uncertainty method {:s} is not defined'.format(uncertainty_method) raise ParameterError(message) if verbose: print("DeltaF = {:8.3f} +- {:8.3f}".format(DeltaF, dDeltaF)) result_vals['Delta_f'] = DeltaF result_vals['dDelta_f'] = dDeltaF else: if verbose: print("DeltaF = {:8.3f}".format(DeltaF)) result_vals['Delta_f'] = DeltaF return result_vals
def create_from_response_pdu(resp_pdu, req_pdu): """ Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. """ read_holding_registers = ReadHoldingRegisters() read_holding_registers.quantity = struct.unpack('>H', req_pdu[-2:])[0] read_holding_registers.byte_count = \ struct.unpack('>B', resp_pdu[1:2])[0] fmt = '>' + (conf.TYPE_CHAR * read_holding_registers.quantity) read_holding_registers.data = list(struct.unpack(fmt, resp_pdu[2:])) return read_holding_registers
Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`.
Below is the the instruction that describes the task: ### Input: Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. ### Response: def create_from_response_pdu(resp_pdu, req_pdu): """ Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. """ read_holding_registers = ReadHoldingRegisters() read_holding_registers.quantity = struct.unpack('>H', req_pdu[-2:])[0] read_holding_registers.byte_count = \ struct.unpack('>B', resp_pdu[1:2])[0] fmt = '>' + (conf.TYPE_CHAR * read_holding_registers.quantity) read_holding_registers.data = list(struct.unpack(fmt, resp_pdu[2:])) return read_holding_registers
def gridLog(**kw): """Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination """ import os if not bool( int(os.environ.get('GRIDLOG_ON', 0)) ): return url = os.environ.get('GRIDLOG_DEST') if url is None: return ## NOTE: urlparse problem w/customized schemes try: scheme = url[:url.find('://')] send = GLRegistry[scheme] send( url, str(GLRecord(**kw)), ) except Exception, ex: print >>sys.stderr, "*** gridLog failed -- %s" %(str(kw))
Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination
Below is the the instruction that describes the task: ### Input: Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination ### Response: def gridLog(**kw): """Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination """ import os if not bool( int(os.environ.get('GRIDLOG_ON', 0)) ): return url = os.environ.get('GRIDLOG_DEST') if url is None: return ## NOTE: urlparse problem w/customized schemes try: scheme = url[:url.find('://')] send = GLRegistry[scheme] send( url, str(GLRecord(**kw)), ) except Exception, ex: print >>sys.stderr, "*** gridLog failed -- %s" %(str(kw))
def _get(self, components, picker, **params): """Generic get which handles call to api and setting of results Return: Results object""" url = '/'.join((self.base,) + components) headers = {"Authorization": "Token token=" + self._token} params['page'] = params.get('page') or self.page params['per_page'] = params.get('per_page') or self.per_page r = requests.get(".".join([url, self.format]), params=params, headers=headers) _next = self._nextify(components, picker, params) return Result(r, picker, _next)
Generic get which handles call to api and setting of results Return: Results object
Below is the the instruction that describes the task: ### Input: Generic get which handles call to api and setting of results Return: Results object ### Response: def _get(self, components, picker, **params): """Generic get which handles call to api and setting of results Return: Results object""" url = '/'.join((self.base,) + components) headers = {"Authorization": "Token token=" + self._token} params['page'] = params.get('page') or self.page params['per_page'] = params.get('per_page') or self.per_page r = requests.get(".".join([url, self.format]), params=params, headers=headers) _next = self._nextify(components, picker, params) return Result(r, picker, _next)
def get_instance(): """Return an instance of Client.""" global _instances user_agents = _config['user-agents'] user_agent = user_agents[ random.randint(0, len(user_agents) - 1) ] if len(user_agents) > 0 else DEFAULT_UA instance_key = user_agent try: instance = _instances[instance_key] except KeyError: instance = Client(user_agent, get_proxy) _instances[instance_key] = instance return instance
Return an instance of Client.
Below is the the instruction that describes the task: ### Input: Return an instance of Client. ### Response: def get_instance(): """Return an instance of Client.""" global _instances user_agents = _config['user-agents'] user_agent = user_agents[ random.randint(0, len(user_agents) - 1) ] if len(user_agents) > 0 else DEFAULT_UA instance_key = user_agent try: instance = _instances[instance_key] except KeyError: instance = Client(user_agent, get_proxy) _instances[instance_key] = instance return instance
def get_allowance(self, asset_name: str, from_address: str, to_address: str, is_full: bool = False) -> str: """ This interface is used to get the the allowance from transfer-from account to transfer-to account in current network. :param asset_name: :param from_address: a base58 encoded account address. :param to_address: a base58 encoded account address. :param is_full: :return: the information of allowance in dictionary form. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_ALLOWANCE, [asset_name, from_address, to_address]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
This interface is used to get the the allowance from transfer-from account to transfer-to account in current network. :param asset_name: :param from_address: a base58 encoded account address. :param to_address: a base58 encoded account address. :param is_full: :return: the information of allowance in dictionary form.
Below is the the instruction that describes the task: ### Input: This interface is used to get the the allowance from transfer-from account to transfer-to account in current network. :param asset_name: :param from_address: a base58 encoded account address. :param to_address: a base58 encoded account address. :param is_full: :return: the information of allowance in dictionary form. ### Response: def get_allowance(self, asset_name: str, from_address: str, to_address: str, is_full: bool = False) -> str: """ This interface is used to get the the allowance from transfer-from account to transfer-to account in current network. :param asset_name: :param from_address: a base58 encoded account address. :param to_address: a base58 encoded account address. :param is_full: :return: the information of allowance in dictionary form. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_ALLOWANCE, [asset_name, from_address, to_address]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
def violin_or_box_plot(df, y, figformat, path, y_name, title=None, plot="violin", log=False, palette=None): """Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments """ comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat, title="Comparing {}".format(y)) if y == "quals": comp.title = "Comparing base call quality scores" if plot == 'violin': logging.info("Nanoplotter: Creating violin plot for {}.".format(y)) process_violin_and_box(ax=sns.violinplot(x="dataset", y=y, data=df, inner=None, cut=0, palette=palette, linewidth=0), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'box': logging.info("Nanoplotter: Creating box plot for {}.".format(y)) process_violin_and_box(ax=sns.boxplot(x="dataset", y=y, data=df, palette=palette), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'ridge': logging.info("Nanoplotter: Creating ridges plot for {}.".format(y)) comp.fig, axes = joypy.joyplot(df, by="dataset", column=y, title=title or comp.title, x_range=[-0.05, np.amax(df[y])]) if log: xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()] axes[-1].set_xticklabels([10**i for i in xticks]) axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center') comp.save(format=figformat) else: logging.error("Unknown comp plot type {}".format(plot)) sys.exit("Unknown comp plot type {}".format(plot)) plt.close("all") return [comp]
Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments
Below is the the instruction that describes the task: ### Input: Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments ### Response: def violin_or_box_plot(df, y, figformat, path, y_name, title=None, plot="violin", log=False, palette=None): """Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments """ comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat, title="Comparing {}".format(y)) if y == "quals": comp.title = "Comparing base call quality scores" if plot == 'violin': logging.info("Nanoplotter: Creating violin plot for {}.".format(y)) process_violin_and_box(ax=sns.violinplot(x="dataset", y=y, data=df, inner=None, cut=0, palette=palette, linewidth=0), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'box': logging.info("Nanoplotter: Creating box plot for {}.".format(y)) process_violin_and_box(ax=sns.boxplot(x="dataset", y=y, data=df, palette=palette), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'ridge': logging.info("Nanoplotter: Creating ridges plot for {}.".format(y)) comp.fig, axes = joypy.joyplot(df, by="dataset", column=y, title=title or comp.title, x_range=[-0.05, np.amax(df[y])]) if log: xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()] axes[-1].set_xticklabels([10**i for i in xticks]) axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center') comp.save(format=figformat) else: logging.error("Unknown comp plot type {}".format(plot)) sys.exit("Unknown comp plot type {}".format(plot)) plt.close("all") return [comp]
def get_raw(tree): """Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")`` """ if isinstance(tree, Tree): words = [] for child in tree: words.append(get_raw(child)) return ' '.join(words) else: return tree
Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")``
Below is the the instruction that describes the task: ### Input: Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")`` ### Response: def get_raw(tree): """Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")`` """ if isinstance(tree, Tree): words = [] for child in tree: words.append(get_raw(child)) return ' '.join(words) else: return tree
def position_rates(self): '''List of position rates for linear degrees of freedom.''' return [self.ode_obj.getPositionRate(i) for i in range(self.LDOF)]
List of position rates for linear degrees of freedom.
Below is the the instruction that describes the task: ### Input: List of position rates for linear degrees of freedom. ### Response: def position_rates(self): '''List of position rates for linear degrees of freedom.''' return [self.ode_obj.getPositionRate(i) for i in range(self.LDOF)]
def get_emitter(self, name: str) -> Callable[[Event], Event]: """Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle. """ return self._event_manager.get_emitter(name)
Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle.
Below is the the instruction that describes the task: ### Input: Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle. ### Response: def get_emitter(self, name: str) -> Callable[[Event], Event]: """Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle. """ return self._event_manager.get_emitter(name)
def find_gene_knockout_reactions(cobra_model, gene_list, compiled_gene_reaction_rules=None): """identify reactions which will be disabled when the genes are knocked out cobra_model: :class:`~cobra.core.Model.Model` gene_list: iterable of :class:`~cobra.core.Gene.Gene` compiled_gene_reaction_rules: dict of {reaction_id: compiled_string} If provided, this gives pre-compiled gene_reaction_rule strings. The compiled rule strings can be evaluated much faster. If a rule is not provided, the regular expression evaluation will be used. Because not all gene_reaction_rule strings can be evaluated, this dict must exclude any rules which can not be used with eval. """ potential_reactions = set() for gene in gene_list: if isinstance(gene, string_types): gene = cobra_model.genes.get_by_id(gene) potential_reactions.update(gene._reaction) gene_set = {str(i) for i in gene_list} if compiled_gene_reaction_rules is None: compiled_gene_reaction_rules = {r: parse_gpr(r.gene_reaction_rule)[0] for r in potential_reactions} return [r for r in potential_reactions if not eval_gpr(compiled_gene_reaction_rules[r], gene_set)]
identify reactions which will be disabled when the genes are knocked out cobra_model: :class:`~cobra.core.Model.Model` gene_list: iterable of :class:`~cobra.core.Gene.Gene` compiled_gene_reaction_rules: dict of {reaction_id: compiled_string} If provided, this gives pre-compiled gene_reaction_rule strings. The compiled rule strings can be evaluated much faster. If a rule is not provided, the regular expression evaluation will be used. Because not all gene_reaction_rule strings can be evaluated, this dict must exclude any rules which can not be used with eval.
Below is the the instruction that describes the task: ### Input: identify reactions which will be disabled when the genes are knocked out cobra_model: :class:`~cobra.core.Model.Model` gene_list: iterable of :class:`~cobra.core.Gene.Gene` compiled_gene_reaction_rules: dict of {reaction_id: compiled_string} If provided, this gives pre-compiled gene_reaction_rule strings. The compiled rule strings can be evaluated much faster. If a rule is not provided, the regular expression evaluation will be used. Because not all gene_reaction_rule strings can be evaluated, this dict must exclude any rules which can not be used with eval. ### Response: def find_gene_knockout_reactions(cobra_model, gene_list, compiled_gene_reaction_rules=None): """identify reactions which will be disabled when the genes are knocked out cobra_model: :class:`~cobra.core.Model.Model` gene_list: iterable of :class:`~cobra.core.Gene.Gene` compiled_gene_reaction_rules: dict of {reaction_id: compiled_string} If provided, this gives pre-compiled gene_reaction_rule strings. The compiled rule strings can be evaluated much faster. If a rule is not provided, the regular expression evaluation will be used. Because not all gene_reaction_rule strings can be evaluated, this dict must exclude any rules which can not be used with eval. """ potential_reactions = set() for gene in gene_list: if isinstance(gene, string_types): gene = cobra_model.genes.get_by_id(gene) potential_reactions.update(gene._reaction) gene_set = {str(i) for i in gene_list} if compiled_gene_reaction_rules is None: compiled_gene_reaction_rules = {r: parse_gpr(r.gene_reaction_rule)[0] for r in potential_reactions} return [r for r in potential_reactions if not eval_gpr(compiled_gene_reaction_rules[r], gene_set)]
def do_action(self, action, objects): """Performs the workflow transition passed in and returns the list of objects that have been successfully transitioned """ transitioned = [] ActionHandlerPool.get_instance().queue_pool() for obj in objects: obj = api.get_object(obj) success, message = do_action_for(obj, action) if success: transitioned.append(obj) ActionHandlerPool.get_instance().resume() return transitioned
Performs the workflow transition passed in and returns the list of objects that have been successfully transitioned
Below is the the instruction that describes the task: ### Input: Performs the workflow transition passed in and returns the list of objects that have been successfully transitioned ### Response: def do_action(self, action, objects): """Performs the workflow transition passed in and returns the list of objects that have been successfully transitioned """ transitioned = [] ActionHandlerPool.get_instance().queue_pool() for obj in objects: obj = api.get_object(obj) success, message = do_action_for(obj, action) if success: transitioned.append(obj) ActionHandlerPool.get_instance().resume() return transitioned
def main(): """CLI entrypoint for scaling policy creation""" logging.basicConfig(format=LOGGING_FORMAT) log = logging.getLogger(__name__) parser = argparse.ArgumentParser() add_debug(parser) add_app(parser) add_properties(parser) add_env(parser) add_region(parser) args = parser.parse_args() logging.getLogger(__package__.split('.')[0]).setLevel(args.debug) log.debug('Parsed arguments: %s', args) asgpolicy = AutoScalingPolicy(app=args.app, prop_path=args.properties, env=args.env, region=args.region) asgpolicy.create_policy()
CLI entrypoint for scaling policy creation
Below is the the instruction that describes the task: ### Input: CLI entrypoint for scaling policy creation ### Response: def main(): """CLI entrypoint for scaling policy creation""" logging.basicConfig(format=LOGGING_FORMAT) log = logging.getLogger(__name__) parser = argparse.ArgumentParser() add_debug(parser) add_app(parser) add_properties(parser) add_env(parser) add_region(parser) args = parser.parse_args() logging.getLogger(__package__.split('.')[0]).setLevel(args.debug) log.debug('Parsed arguments: %s', args) asgpolicy = AutoScalingPolicy(app=args.app, prop_path=args.properties, env=args.env, region=args.region) asgpolicy.create_policy()
def revert(self, request, queryset): """ Admin action to revert a configuration back to the selected value """ if queryset.count() != 1: self.message_user(request, _("Please select a single configuration to revert to.")) return target = queryset[0] target.id = None self.save_model(request, target, None, False) self.message_user(request, _("Reverted configuration.")) return HttpResponseRedirect( reverse( 'admin:{}_{}_change'.format( self.model._meta.app_label, self.model._meta.model_name, ), args=(target.id,), ) )
Admin action to revert a configuration back to the selected value
Below is the the instruction that describes the task: ### Input: Admin action to revert a configuration back to the selected value ### Response: def revert(self, request, queryset): """ Admin action to revert a configuration back to the selected value """ if queryset.count() != 1: self.message_user(request, _("Please select a single configuration to revert to.")) return target = queryset[0] target.id = None self.save_model(request, target, None, False) self.message_user(request, _("Reverted configuration.")) return HttpResponseRedirect( reverse( 'admin:{}_{}_change'.format( self.model._meta.app_label, self.model._meta.model_name, ), args=(target.id,), ) )
def dumps(module, cls=PVLEncoder, **kwargs): """Serialize ``module`` as a pvl module formated byte string. :param module: a ```PVLModule``` or ```dict``` like object to serialize :param cls: the encoder class used to serialize the pvl module. You may use the default ``PVLEncoder`` class or provided encoder formats such as the ```IsisCubeLabelEncoder``` and ```PDSLabelEncoder``` classes. You may also provided a custom sublcass of ```PVLEncoder``` :param **kwargs: the keyword arguments to pass to the encoder class. :returns: a byte string encoding of the pvl module """ stream = io.BytesIO() cls(**kwargs).encode(module, stream) return stream.getvalue()
Serialize ``module`` as a pvl module formated byte string. :param module: a ```PVLModule``` or ```dict``` like object to serialize :param cls: the encoder class used to serialize the pvl module. You may use the default ``PVLEncoder`` class or provided encoder formats such as the ```IsisCubeLabelEncoder``` and ```PDSLabelEncoder``` classes. You may also provided a custom sublcass of ```PVLEncoder``` :param **kwargs: the keyword arguments to pass to the encoder class. :returns: a byte string encoding of the pvl module
Below is the the instruction that describes the task: ### Input: Serialize ``module`` as a pvl module formated byte string. :param module: a ```PVLModule``` or ```dict``` like object to serialize :param cls: the encoder class used to serialize the pvl module. You may use the default ``PVLEncoder`` class or provided encoder formats such as the ```IsisCubeLabelEncoder``` and ```PDSLabelEncoder``` classes. You may also provided a custom sublcass of ```PVLEncoder``` :param **kwargs: the keyword arguments to pass to the encoder class. :returns: a byte string encoding of the pvl module ### Response: def dumps(module, cls=PVLEncoder, **kwargs): """Serialize ``module`` as a pvl module formated byte string. :param module: a ```PVLModule``` or ```dict``` like object to serialize :param cls: the encoder class used to serialize the pvl module. You may use the default ``PVLEncoder`` class or provided encoder formats such as the ```IsisCubeLabelEncoder``` and ```PDSLabelEncoder``` classes. You may also provided a custom sublcass of ```PVLEncoder``` :param **kwargs: the keyword arguments to pass to the encoder class. :returns: a byte string encoding of the pvl module """ stream = io.BytesIO() cls(**kwargs).encode(module, stream) return stream.getvalue()
def workflow_start(obj, queue, keep_data, name, workflow_args): """ Send a workflow to the queue. \b NAME: The name of the workflow that should be started. WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2. """ try: start_workflow(name=name, config=obj['config'], queue=queue, clear_data_store=not keep_data, store_args=dict([arg.split('=', maxsplit=1) for arg in workflow_args])) except (WorkflowArgumentError, WorkflowImportError) as e: click.echo(_style(obj['show_color'], 'An error occurred when trying to start the workflow', fg='red', bold=True)) click.echo('{}'.format(e)) except WorkflowDefinitionError as e: click.echo(_style(obj['show_color'], 'The graph {} in workflow {} is not a directed acyclic graph'. format(e.graph_name, e.workflow_name), fg='red', bold=True))
Send a workflow to the queue. \b NAME: The name of the workflow that should be started. WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2.
Below is the the instruction that describes the task: ### Input: Send a workflow to the queue. \b NAME: The name of the workflow that should be started. WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2. ### Response: def workflow_start(obj, queue, keep_data, name, workflow_args): """ Send a workflow to the queue. \b NAME: The name of the workflow that should be started. WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2. """ try: start_workflow(name=name, config=obj['config'], queue=queue, clear_data_store=not keep_data, store_args=dict([arg.split('=', maxsplit=1) for arg in workflow_args])) except (WorkflowArgumentError, WorkflowImportError) as e: click.echo(_style(obj['show_color'], 'An error occurred when trying to start the workflow', fg='red', bold=True)) click.echo('{}'.format(e)) except WorkflowDefinitionError as e: click.echo(_style(obj['show_color'], 'The graph {} in workflow {} is not a directed acyclic graph'. format(e.graph_name, e.workflow_name), fg='red', bold=True))
def set_group_name(self, group, old_name, new_name): """ Group was renamed. """ lgroup = self._get_group(old_name) rename(lgroup, database=self._database, cn=new_name)
Group was renamed.
Below is the the instruction that describes the task: ### Input: Group was renamed. ### Response: def set_group_name(self, group, old_name, new_name): """ Group was renamed. """ lgroup = self._get_group(old_name) rename(lgroup, database=self._database, cn=new_name)
def fail(self, message, param=None, ctx=None): """Helper method to fail with an invalid value message.""" raise BadParameter(message, ctx=ctx, param=param)
Helper method to fail with an invalid value message.
Below is the the instruction that describes the task: ### Input: Helper method to fail with an invalid value message. ### Response: def fail(self, message, param=None, ctx=None): """Helper method to fail with an invalid value message.""" raise BadParameter(message, ctx=ctx, param=param)
def get_next(self): """Return next iteration time related to loop time""" return self.loop_time + (self.croniter.get_next(float) - self.time)
Return next iteration time related to loop time
Below is the the instruction that describes the task: ### Input: Return next iteration time related to loop time ### Response: def get_next(self): """Return next iteration time related to loop time""" return self.loop_time + (self.croniter.get_next(float) - self.time)
def load(cls, path, base=None): '''Either load a path and return a shovel object or return None''' obj = cls() obj.read(path, base) return obj
Either load a path and return a shovel object or return None
Below is the the instruction that describes the task: ### Input: Either load a path and return a shovel object or return None ### Response: def load(cls, path, base=None): '''Either load a path and return a shovel object or return None''' obj = cls() obj.read(path, base) return obj
def governor(self, dep_type, node): """ Registers a node as governing this node :param dep_type: The dependency type :type dep_type: str :param node: :return: self, provides fluent interface :rtype: corenlp_xml.dependencies.DependencyNode """ self._governors[dep_type] = self._governors.get(dep_type, []) + [node] return self
Registers a node as governing this node :param dep_type: The dependency type :type dep_type: str :param node: :return: self, provides fluent interface :rtype: corenlp_xml.dependencies.DependencyNode
Below is the the instruction that describes the task: ### Input: Registers a node as governing this node :param dep_type: The dependency type :type dep_type: str :param node: :return: self, provides fluent interface :rtype: corenlp_xml.dependencies.DependencyNode ### Response: def governor(self, dep_type, node): """ Registers a node as governing this node :param dep_type: The dependency type :type dep_type: str :param node: :return: self, provides fluent interface :rtype: corenlp_xml.dependencies.DependencyNode """ self._governors[dep_type] = self._governors.get(dep_type, []) + [node] return self
def check_json(code): """Yield errors.""" try: json.loads(code) except ValueError as exception: message = '{}'.format(exception) line_number = 0 found = re.search(r': line\s+([0-9]+)[^:]*$', message) if found: line_number = int(found.group(1)) yield (int(line_number), message)
Yield errors.
Below is the the instruction that describes the task: ### Input: Yield errors. ### Response: def check_json(code): """Yield errors.""" try: json.loads(code) except ValueError as exception: message = '{}'.format(exception) line_number = 0 found = re.search(r': line\s+([0-9]+)[^:]*$', message) if found: line_number = int(found.group(1)) yield (int(line_number), message)
def fetch_url(src, dst): """ Fetch file from URL src and save it to dst. """ # we do not use the nicer sys.version_info.major # for compatibility with Python < 2.7 if sys.version_info[0] > 2: import urllib.request class URLopener(urllib.request.FancyURLopener): def http_error_default(self, url, fp, errcode, errmsg, headers): sys.stderr.write("ERROR: could not fetch {0}\n".format(url)) sys.exit(-1) else: import urllib class URLopener(urllib.FancyURLopener): def http_error_default(self, url, fp, errcode, errmsg, headers): sys.stderr.write("ERROR: could not fetch {0}\n".format(url)) sys.exit(-1) dirname = os.path.dirname(dst) if dirname != '': if not os.path.isdir(dirname): os.makedirs(dirname) opener = URLopener() opener.retrieve(src, dst)
Fetch file from URL src and save it to dst.
Below is the the instruction that describes the task: ### Input: Fetch file from URL src and save it to dst. ### Response: def fetch_url(src, dst): """ Fetch file from URL src and save it to dst. """ # we do not use the nicer sys.version_info.major # for compatibility with Python < 2.7 if sys.version_info[0] > 2: import urllib.request class URLopener(urllib.request.FancyURLopener): def http_error_default(self, url, fp, errcode, errmsg, headers): sys.stderr.write("ERROR: could not fetch {0}\n".format(url)) sys.exit(-1) else: import urllib class URLopener(urllib.FancyURLopener): def http_error_default(self, url, fp, errcode, errmsg, headers): sys.stderr.write("ERROR: could not fetch {0}\n".format(url)) sys.exit(-1) dirname = os.path.dirname(dst) if dirname != '': if not os.path.isdir(dirname): os.makedirs(dirname) opener = URLopener() opener.retrieve(src, dst)
def flatten(self): """Create a flattened version by putting output first and then states.""" ls = [self.output] ls.extend(self.state) return ls
Create a flattened version by putting output first and then states.
Below is the the instruction that describes the task: ### Input: Create a flattened version by putting output first and then states. ### Response: def flatten(self): """Create a flattened version by putting output first and then states.""" ls = [self.output] ls.extend(self.state) return ls
def _request(self, proxy, timeout): """ Returns WPToolsRequest object """ return request.WPToolsRequest(self.flags['silent'], self.flags['verbose'], proxy, timeout)
Returns WPToolsRequest object
Below is the the instruction that describes the task: ### Input: Returns WPToolsRequest object ### Response: def _request(self, proxy, timeout): """ Returns WPToolsRequest object """ return request.WPToolsRequest(self.flags['silent'], self.flags['verbose'], proxy, timeout)
def new_build(py_ver: PyVer): """Job for building/caching different docker images""" cache_file = f'app_{py_ver.name}.tar' cache_path = f'{cache_dir}/{cache_file}' cache_key = f'v3-{py_ver.name}-{{{{ .Branch }}}}' template = yaml.safe_load(f""" machine: image: 'circleci/classic:201710-02' docker_layer_caching: true steps: - checkout - restore_cache: keys: ['{cache_key}'] paths: ['{cache_path}'] - attach_workspace: at: {cache_dir} - run: name: Load docker image for TestRunner command: docker load -i {cache_dir}/{testrunner_cache} - run: name: Load docker image layer cache command: docker load -i {cache_path} || true # silent failure if missing cache - run: name: Build application docker image command: >- docker build --cache-from={py_ver.tag} -t {py_ver.tag} --build-arg TESTRUNNER_VERSION=$(cat {cache_dir}/testrunner_version.txt) -f container/{py_ver.docker_file} . - run: name: Make cache directory command: mkdir -p {cache_dir} - run: name: Export docker image layer cache command: docker save -o {cache_path} {py_ver.tag} - save_cache: # cache is used between builds key: '{cache_key}' paths: ['{cache_path}'] - persist_to_workspace: # workspace is used later in this same build root: {cache_dir} paths: '{cache_file}' # extracting documentation for review: - run: name: Start a named container command: docker run --name=SDK {py_ver.tag} - run: name: Extract the documentation command: 'docker cp SDK:/build/built_docs ./built_docs' - store_artifacts: path: built_docs """) return build_name(py_ver), template
Job for building/caching different docker images
Below is the the instruction that describes the task: ### Input: Job for building/caching different docker images ### Response: def new_build(py_ver: PyVer): """Job for building/caching different docker images""" cache_file = f'app_{py_ver.name}.tar' cache_path = f'{cache_dir}/{cache_file}' cache_key = f'v3-{py_ver.name}-{{{{ .Branch }}}}' template = yaml.safe_load(f""" machine: image: 'circleci/classic:201710-02' docker_layer_caching: true steps: - checkout - restore_cache: keys: ['{cache_key}'] paths: ['{cache_path}'] - attach_workspace: at: {cache_dir} - run: name: Load docker image for TestRunner command: docker load -i {cache_dir}/{testrunner_cache} - run: name: Load docker image layer cache command: docker load -i {cache_path} || true # silent failure if missing cache - run: name: Build application docker image command: >- docker build --cache-from={py_ver.tag} -t {py_ver.tag} --build-arg TESTRUNNER_VERSION=$(cat {cache_dir}/testrunner_version.txt) -f container/{py_ver.docker_file} . - run: name: Make cache directory command: mkdir -p {cache_dir} - run: name: Export docker image layer cache command: docker save -o {cache_path} {py_ver.tag} - save_cache: # cache is used between builds key: '{cache_key}' paths: ['{cache_path}'] - persist_to_workspace: # workspace is used later in this same build root: {cache_dir} paths: '{cache_file}' # extracting documentation for review: - run: name: Start a named container command: docker run --name=SDK {py_ver.tag} - run: name: Extract the documentation command: 'docker cp SDK:/build/built_docs ./built_docs' - store_artifacts: path: built_docs """) return build_name(py_ver), template
def launch_modules_with_names(modules_with_names, module_args={}, kill_before_launch=True): '''launch module.main functions in another process''' processes = [] if kill_before_launch: for module_name, name in modules_with_names: kill_module(name) for module_name, name in modules_with_names: m = importlib.import_module(module_name) args = {} if module_name in module_args: args = module_args[module_name] p1 = Process(target=m.main, args=args) p1.daemon = True p1.start() processes.append(p1) with open(get_launched_module_pid_file(name), 'w') as f: f.write('{}'.format(p1.pid)) return processes
launch module.main functions in another process
Below is the the instruction that describes the task: ### Input: launch module.main functions in another process ### Response: def launch_modules_with_names(modules_with_names, module_args={}, kill_before_launch=True): '''launch module.main functions in another process''' processes = [] if kill_before_launch: for module_name, name in modules_with_names: kill_module(name) for module_name, name in modules_with_names: m = importlib.import_module(module_name) args = {} if module_name in module_args: args = module_args[module_name] p1 = Process(target=m.main, args=args) p1.daemon = True p1.start() processes.append(p1) with open(get_launched_module_pid_file(name), 'w') as f: f.write('{}'.format(p1.pid)) return processes
def _ignore_class_scope(self, node): """ Return True if the node is in a local class scope, as an assignment. :param node: Node considered :type node: astroid.Node :return: True if the node is in a local class scope, as an assignment. False otherwise. :rtype: bool """ # Detect if we are in a local class scope, as an assignment. # For example, the following is fair game. # # class A: # b = 1 # c = lambda b=b: b * b # # class B: # tp = 1 # def func(self, arg: tp): # ... # class C: # tp = 2 # def func(self, arg=tp): # ... name = node.name frame = node.statement().scope() in_annotation_or_default = self._defined_in_function_definition(node, frame) if in_annotation_or_default: frame_locals = frame.parent.scope().locals else: frame_locals = frame.locals return not ( (isinstance(frame, astroid.ClassDef) or in_annotation_or_default) and name in frame_locals )
Return True if the node is in a local class scope, as an assignment. :param node: Node considered :type node: astroid.Node :return: True if the node is in a local class scope, as an assignment. False otherwise. :rtype: bool
Below is the the instruction that describes the task: ### Input: Return True if the node is in a local class scope, as an assignment. :param node: Node considered :type node: astroid.Node :return: True if the node is in a local class scope, as an assignment. False otherwise. :rtype: bool ### Response: def _ignore_class_scope(self, node): """ Return True if the node is in a local class scope, as an assignment. :param node: Node considered :type node: astroid.Node :return: True if the node is in a local class scope, as an assignment. False otherwise. :rtype: bool """ # Detect if we are in a local class scope, as an assignment. # For example, the following is fair game. # # class A: # b = 1 # c = lambda b=b: b * b # # class B: # tp = 1 # def func(self, arg: tp): # ... # class C: # tp = 2 # def func(self, arg=tp): # ... name = node.name frame = node.statement().scope() in_annotation_or_default = self._defined_in_function_definition(node, frame) if in_annotation_or_default: frame_locals = frame.parent.scope().locals else: frame_locals = frame.locals return not ( (isinstance(frame, astroid.ClassDef) or in_annotation_or_default) and name in frame_locals )
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
Below is the the instruction that describes the task: ### Input: Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success ### Response: def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
def readBatchTupleQuotes(self, symbols, start, end): ''' read batch quotes as tuple to save memory ''' if end is None: end=sys.maxint ret={} session=self.getReadSession()() try: symbolChunks=splitListEqually(symbols, 100) for chunk in symbolChunks: rows=session.query(Quote.symbol, Quote.time, Quote.close, Quote.volume, Quote.low, Quote.high).filter(and_(Quote.symbol.in_(chunk), Quote.time >= int(start), Quote.time < int(end))) for row in rows: if row.time not in ret: ret[row.time]={} ret[row.time][row.symbol]=self.__sqlToTupleQuote(row) finally: self.getReadSession().remove() return ret
read batch quotes as tuple to save memory
Below is the the instruction that describes the task: ### Input: read batch quotes as tuple to save memory ### Response: def readBatchTupleQuotes(self, symbols, start, end): ''' read batch quotes as tuple to save memory ''' if end is None: end=sys.maxint ret={} session=self.getReadSession()() try: symbolChunks=splitListEqually(symbols, 100) for chunk in symbolChunks: rows=session.query(Quote.symbol, Quote.time, Quote.close, Quote.volume, Quote.low, Quote.high).filter(and_(Quote.symbol.in_(chunk), Quote.time >= int(start), Quote.time < int(end))) for row in rows: if row.time not in ret: ret[row.time]={} ret[row.time][row.symbol]=self.__sqlToTupleQuote(row) finally: self.getReadSession().remove() return ret
def authorize(self, role): """Check permission""" resource, action = parse_request() roles = self.meta.get("$roles", {}) message = "%s can't access %s.%s" % (role, resource, action) try: if action not in roles[role][resource]: abort(403, "PermissionDeny", message) except KeyError: abort(403, "PermissionDeny", message)
Check permission
Below is the the instruction that describes the task: ### Input: Check permission ### Response: def authorize(self, role): """Check permission""" resource, action = parse_request() roles = self.meta.get("$roles", {}) message = "%s can't access %s.%s" % (role, resource, action) try: if action not in roles[role][resource]: abort(403, "PermissionDeny", message) except KeyError: abort(403, "PermissionDeny", message)
def check_number_available(self, id_environment, num_vlan, id_vlan): """Checking if environment has a number vlan available :param id_environment: Identifier of environment :param num_vlan: Vlan number :param id_vlan: Vlan indentifier (False if inserting a vlan) :return: True is has number available, False if hasn't :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidParameterError: Invalid ID for VLAN. :raise VlanNaoExisteError: VLAN not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'vlan/check_number_available/' + \ str(id_environment) + '/' + str(num_vlan) + '/' + str(id_vlan) code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Checking if environment has a number vlan available :param id_environment: Identifier of environment :param num_vlan: Vlan number :param id_vlan: Vlan indentifier (False if inserting a vlan) :return: True is has number available, False if hasn't :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidParameterError: Invalid ID for VLAN. :raise VlanNaoExisteError: VLAN not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
Below is the the instruction that describes the task: ### Input: Checking if environment has a number vlan available :param id_environment: Identifier of environment :param num_vlan: Vlan number :param id_vlan: Vlan indentifier (False if inserting a vlan) :return: True is has number available, False if hasn't :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidParameterError: Invalid ID for VLAN. :raise VlanNaoExisteError: VLAN not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. ### Response: def check_number_available(self, id_environment, num_vlan, id_vlan): """Checking if environment has a number vlan available :param id_environment: Identifier of environment :param num_vlan: Vlan number :param id_vlan: Vlan indentifier (False if inserting a vlan) :return: True is has number available, False if hasn't :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidParameterError: Invalid ID for VLAN. :raise VlanNaoExisteError: VLAN not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'vlan/check_number_available/' + \ str(id_environment) + '/' + str(num_vlan) + '/' + str(id_vlan) code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
def read_zipfile(self, encoding='utf8'): """ READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING """ from zipfile import ZipFile with ZipFile(self.abspath) as zipped: for num, zip_name in enumerate(zipped.namelist()): return zipped.open(zip_name).read().decode(encoding)
READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING
Below is the the instruction that describes the task: ### Input: READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING ### Response: def read_zipfile(self, encoding='utf8'): """ READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING """ from zipfile import ZipFile with ZipFile(self.abspath) as zipped: for num, zip_name in enumerate(zipped.namelist()): return zipped.open(zip_name).read().decode(encoding)
def get_parents(docgraph, child_node, strict=True): """Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs. """ parents = [] for src, _, edge_attrs in docgraph.in_edges(child_node, data=True): if edge_attrs['edge_type'] == EdgeTypes.dominance_relation: parents.append(src) if strict and len(parents) > 1: raise ValueError(("In a syntax tree, a node can't be " "dominated by more than one parent")) return parents
Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs.
Below is the the instruction that describes the task: ### Input: Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs. ### Response: def get_parents(docgraph, child_node, strict=True): """Return a list of parent nodes that dominate this child. In a 'syntax tree' a node never has more than one parent node dominating it. To enforce this, set strict=True. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph strict : bool If True, raise a ValueError if a child node is dominated by more than one parent node. Returns ------- parents : list a list of (parent) node IDs. """ parents = [] for src, _, edge_attrs in docgraph.in_edges(child_node, data=True): if edge_attrs['edge_type'] == EdgeTypes.dominance_relation: parents.append(src) if strict and len(parents) > 1: raise ValueError(("In a syntax tree, a node can't be " "dominated by more than one parent")) return parents
def _K(self, R): """Return numpy array from K1 up to and including Kn. (eqn. 5)""" return self._ns * self._N / R / self._sin_alpha
Return numpy array from K1 up to and including Kn. (eqn. 5)
Below is the the instruction that describes the task: ### Input: Return numpy array from K1 up to and including Kn. (eqn. 5) ### Response: def _K(self, R): """Return numpy array from K1 up to and including Kn. (eqn. 5)""" return self._ns * self._N / R / self._sin_alpha
def new_keypair(key, value, ambig, unambig): """ Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return: """ if key in ambig: return if key in unambig and value != unambig[key]: ambig.add(key) del unambig[key] return unambig[key] = value return
Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return:
Below is the the instruction that describes the task: ### Input: Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return: ### Response: def new_keypair(key, value, ambig, unambig): """ Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return: """ if key in ambig: return if key in unambig and value != unambig[key]: ambig.add(key) del unambig[key] return unambig[key] = value return
def _merge_hash(option, opt_str, value, parser): # type: (Option, str, str, OptionParser) -> None """Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name.""" if not parser.values.hashes: parser.values.hashes = {} # type: ignore try: algo, digest = value.split(':', 1) except ValueError: parser.error('Arguments to %s must be a hash name ' 'followed by a value, like --hash=sha256:abcde...' % opt_str) if algo not in STRONG_HASHES: parser.error('Allowed hash algorithms for %s are %s.' % (opt_str, ', '.join(STRONG_HASHES))) parser.values.hashes.setdefault(algo, []).append(digest)
Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name.
Below is the the instruction that describes the task: ### Input: Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name. ### Response: def _merge_hash(option, opt_str, value, parser): # type: (Option, str, str, OptionParser) -> None """Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name.""" if not parser.values.hashes: parser.values.hashes = {} # type: ignore try: algo, digest = value.split(':', 1) except ValueError: parser.error('Arguments to %s must be a hash name ' 'followed by a value, like --hash=sha256:abcde...' % opt_str) if algo not in STRONG_HASHES: parser.error('Allowed hash algorithms for %s are %s.' % (opt_str, ', '.join(STRONG_HASHES))) parser.values.hashes.setdefault(algo, []).append(digest)
def first(iterable = None, *, name = None, metric = call_default): """Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """ if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)
Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric
Below is the the instruction that describes the task: ### Input: Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric ### Response: def first(iterable = None, *, name = None, metric = call_default): """Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """ if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)
def hurst_compare_nvals(data, nvals=None): """ Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices """ import matplotlib.pyplot as plt data = np.asarray(data) n_all = np.arange(2,len(data)+1) dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly") dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly") n_def = np.round(np.exp(dd_def[1][0])).astype("int32") n_div = n_all[np.where(len(data) % n_all[:-1] == 0)] dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly") def corr(nvals): return [np.log(nolds.expected_rs(n)) for n in nvals] l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o") l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o") l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o") l_cst = [] t_cst = [] if nvals is not None: dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly") l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o") l_cst = l_cst t_cst = ["custom"] plt.xlabel("log(n)") plt.ylabel("log((R/S)_n - E[(R/S)_n])") plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst) labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"]) for data, label in labeled_data: print("%s: %.3f" % (label, data)) if nvals is not None: print("custom: %.3f" % dd_cst[0]) plt.show()
Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices
Below is the the instruction that describes the task: ### Input: Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices ### Response: def hurst_compare_nvals(data, nvals=None): """ Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices """ import matplotlib.pyplot as plt data = np.asarray(data) n_all = np.arange(2,len(data)+1) dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly") dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly") n_def = np.round(np.exp(dd_def[1][0])).astype("int32") n_div = n_all[np.where(len(data) % n_all[:-1] == 0)] dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly") def corr(nvals): return [np.log(nolds.expected_rs(n)) for n in nvals] l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o") l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o") l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o") l_cst = [] t_cst = [] if nvals is not None: dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly") l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o") l_cst = l_cst t_cst = ["custom"] plt.xlabel("log(n)") plt.ylabel("log((R/S)_n - E[(R/S)_n])") plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst) labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"]) for data, label in labeled_data: print("%s: %.3f" % (label, data)) if nvals is not None: print("custom: %.3f" % dd_cst[0]) plt.show()
def _publish_daemon(self, log_queue=None): ''' Bind to the interface specified in the configuration file ''' salt.utils.process.appendproctitle(self.__class__.__name__) if log_queue: salt.log.setup.set_multiprocessing_logging_queue(log_queue) salt.log.setup.setup_multiprocessing_logging(log_queue) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_sock.setsockopt(zmq.LINGER, -1) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_sock.setsockopt(zmq.LINGER, -1) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on %s', pub_uri) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on %s', pull_uri) with salt.utils.files.set_umask(0o177): pull_sock.bind(pull_uri) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: log.debug('Publish daemon getting data from puller %s', pull_uri) package = pull_sock.recv() log.debug('Publish daemon received payload. size=%d', len(package)) unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] log.trace('Accepted unpacked package from puller') if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: log.trace('Sending filtered data over publisher %s', pub_uri) # zmq filters are substring match, hash the topic # to avoid collisions htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(topic).hexdigest()) pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent') # Syndic broadcast if self.opts.get('order_masters'): log.trace('Sending filtered data to syndic') pub_sock.send(b'syndic', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent to syndic') # otherwise its a broadcast else: # TODO: constants file for "broadcast" log.trace('Sending broadcasted data over publisher %s', pub_uri) pub_sock.send(b'broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Broadcasted data has been sent') else: log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri) pub_sock.send(payload) log.trace('Unfiltered data has been sent') except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: log.trace('Publish daemon caught Keyboard interupt, tearing down') # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.close() if pull_sock.closed is False: pull_sock.close() if context.closed is False: context.term()
Bind to the interface specified in the configuration file
Below is the the instruction that describes the task: ### Input: Bind to the interface specified in the configuration file ### Response: def _publish_daemon(self, log_queue=None): ''' Bind to the interface specified in the configuration file ''' salt.utils.process.appendproctitle(self.__class__.__name__) if log_queue: salt.log.setup.set_multiprocessing_logging_queue(log_queue) salt.log.setup.setup_multiprocessing_logging(log_queue) # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) _set_tcp_keepalive(pub_sock, self.opts) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: # Set the High Water Marks. For more information on HWM, see: # http://api.zeromq.org/4-1:zmq-setsockopt pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000)) pub_sock.setsockopt(zmq.LINGER, -1) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_sock.setsockopt(zmq.LINGER, -1) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.zeromq.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on %s', pub_uri) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on %s', pull_uri) with salt.utils.files.set_umask(0o177): pull_sock.bind(pull_uri) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: log.debug('Publish daemon getting data from puller %s', pull_uri) package = pull_sock.recv() log.debug('Publish daemon received payload. size=%d', len(package)) unpacked_package = salt.payload.unpackage(package) if six.PY3: unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package) payload = unpacked_package['payload'] log.trace('Accepted unpacked package from puller') if self.opts['zmq_filtering']: # if you have a specific topic list, use that if 'topic_lst' in unpacked_package: for topic in unpacked_package['topic_lst']: log.trace('Sending filtered data over publisher %s', pub_uri) # zmq filters are substring match, hash the topic # to avoid collisions htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(topic).hexdigest()) pub_sock.send(htopic, flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent') # Syndic broadcast if self.opts.get('order_masters'): log.trace('Sending filtered data to syndic') pub_sock.send(b'syndic', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Filtered data has been sent to syndic') # otherwise its a broadcast else: # TODO: constants file for "broadcast" log.trace('Sending broadcasted data over publisher %s', pub_uri) pub_sock.send(b'broadcast', flags=zmq.SNDMORE) pub_sock.send(payload) log.trace('Broadcasted data has been sent') else: log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri) pub_sock.send(payload) log.trace('Unfiltered data has been sent') except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: log.trace('Publish daemon caught Keyboard interupt, tearing down') # Cleanly close the sockets if we're shutting down if pub_sock.closed is False: pub_sock.close() if pull_sock.closed is False: pull_sock.close() if context.closed is False: context.term()
def setDirection( self, direction ): """ Sets the direction for this widget to the inputed direction. :param direction | <XPopupWidget.Direction> """ if ( direction == XPopupWidget.Direction.North ): self.setAnchor(XPopupWidget.Anchor.TopCenter) elif ( direction == XPopupWidget.Direction.South ): self.setAnchor(XPopupWidget.Anchor.BottomCenter) elif ( direction == XPopupWidget.Direction.East ): self.setAnchor(XPopupWidget.Anchor.LeftCenter) else: self.setAnchor(XPopupWidget.Anchor.RightCenter)
Sets the direction for this widget to the inputed direction. :param direction | <XPopupWidget.Direction>
Below is the the instruction that describes the task: ### Input: Sets the direction for this widget to the inputed direction. :param direction | <XPopupWidget.Direction> ### Response: def setDirection( self, direction ): """ Sets the direction for this widget to the inputed direction. :param direction | <XPopupWidget.Direction> """ if ( direction == XPopupWidget.Direction.North ): self.setAnchor(XPopupWidget.Anchor.TopCenter) elif ( direction == XPopupWidget.Direction.South ): self.setAnchor(XPopupWidget.Anchor.BottomCenter) elif ( direction == XPopupWidget.Direction.East ): self.setAnchor(XPopupWidget.Anchor.LeftCenter) else: self.setAnchor(XPopupWidget.Anchor.RightCenter)
def constant(interval=1): """Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values. """ try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: yield val
Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values.
Below is the the instruction that describes the task: ### Input: Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values. ### Response: def constant(interval=1): """Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values. """ try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: yield val
def _cmp_fstruct(self, s1, s2, frac_tol, mask): """ Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 """ if len(s2) > len(s1): raise ValueError("s1 must be larger than s2") if mask.shape != (len(s2), len(s1)): raise ValueError("mask has incorrect shape") return is_coord_subset_pbc(s2, s1, frac_tol, mask)
Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1
Below is the the instruction that describes the task: ### Input: Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 ### Response: def _cmp_fstruct(self, s1, s2, frac_tol, mask): """ Returns true if a matching exists between s2 and s2 under frac_tol. s2 should be a subset of s1 """ if len(s2) > len(s1): raise ValueError("s1 must be larger than s2") if mask.shape != (len(s2), len(s1)): raise ValueError("mask has incorrect shape") return is_coord_subset_pbc(s2, s1, frac_tol, mask)
def finalize_options(self): """Post-process options.""" if self.test: print("V%s will publish to the test.pypi.org" % version) elif self.release: print("V%s will publish to the pypi.org" % version)
Post-process options.
Below is the the instruction that describes the task: ### Input: Post-process options. ### Response: def finalize_options(self): """Post-process options.""" if self.test: print("V%s will publish to the test.pypi.org" % version) elif self.release: print("V%s will publish to the pypi.org" % version)
def _get_next(self): """Executes the next iteration of the PRNG evolution process, and returns the result """ self.state = PRNG_A * self.state % PRNG_M return self.state
Executes the next iteration of the PRNG evolution process, and returns the result
Below is the the instruction that describes the task: ### Input: Executes the next iteration of the PRNG evolution process, and returns the result ### Response: def _get_next(self): """Executes the next iteration of the PRNG evolution process, and returns the result """ self.state = PRNG_A * self.state % PRNG_M return self.state
def _calculateOverlap(self, inputVector): """ This function determines each column's overlap with the current input vector. The overlap of a column is the number of synapses for that column that are connected (permanence value is greater than '_synPermConnected') to input bits which are turned on. The implementation takes advantage of the SparseBinaryMatrix class to perform this calculation efficiently. Parameters: ---------------------------- :param inputVector: a numpy array of 0's and 1's that comprises the input to the spatial pooler. """ overlaps = numpy.zeros(self._numColumns, dtype=realDType) self._connectedSynapses.rightVecSumAtNZ_fast(inputVector.astype(realDType), overlaps) return overlaps
This function determines each column's overlap with the current input vector. The overlap of a column is the number of synapses for that column that are connected (permanence value is greater than '_synPermConnected') to input bits which are turned on. The implementation takes advantage of the SparseBinaryMatrix class to perform this calculation efficiently. Parameters: ---------------------------- :param inputVector: a numpy array of 0's and 1's that comprises the input to the spatial pooler.
Below is the the instruction that describes the task: ### Input: This function determines each column's overlap with the current input vector. The overlap of a column is the number of synapses for that column that are connected (permanence value is greater than '_synPermConnected') to input bits which are turned on. The implementation takes advantage of the SparseBinaryMatrix class to perform this calculation efficiently. Parameters: ---------------------------- :param inputVector: a numpy array of 0's and 1's that comprises the input to the spatial pooler. ### Response: def _calculateOverlap(self, inputVector): """ This function determines each column's overlap with the current input vector. The overlap of a column is the number of synapses for that column that are connected (permanence value is greater than '_synPermConnected') to input bits which are turned on. The implementation takes advantage of the SparseBinaryMatrix class to perform this calculation efficiently. Parameters: ---------------------------- :param inputVector: a numpy array of 0's and 1's that comprises the input to the spatial pooler. """ overlaps = numpy.zeros(self._numColumns, dtype=realDType) self._connectedSynapses.rightVecSumAtNZ_fast(inputVector.astype(realDType), overlaps) return overlaps
def recent_activity(self, user_id): """Recent activity (actions) for a given user""" M = models # noqa if request.args.get('limit'): limit = int(request.args.get('limit')) else: limit = 1000 qry = ( db.session.query(M.Log, M.Dashboard, M.Slice) .outerjoin( M.Dashboard, M.Dashboard.id == M.Log.dashboard_id, ) .outerjoin( M.Slice, M.Slice.id == M.Log.slice_id, ) .filter( sqla.and_( ~M.Log.action.in_(('queries', 'shortner', 'sql_json')), M.Log.user_id == user_id, ), ) .order_by(M.Log.dttm.desc()) .limit(limit) ) payload = [] for log in qry.all(): item_url = None item_title = None if log.Dashboard: item_url = log.Dashboard.url item_title = log.Dashboard.dashboard_title elif log.Slice: item_url = log.Slice.slice_url item_title = log.Slice.slice_name payload.append({ 'action': log.Log.action, 'item_url': item_url, 'item_title': item_title, 'time': log.Log.dttm, }) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
Recent activity (actions) for a given user
Below is the the instruction that describes the task: ### Input: Recent activity (actions) for a given user ### Response: def recent_activity(self, user_id): """Recent activity (actions) for a given user""" M = models # noqa if request.args.get('limit'): limit = int(request.args.get('limit')) else: limit = 1000 qry = ( db.session.query(M.Log, M.Dashboard, M.Slice) .outerjoin( M.Dashboard, M.Dashboard.id == M.Log.dashboard_id, ) .outerjoin( M.Slice, M.Slice.id == M.Log.slice_id, ) .filter( sqla.and_( ~M.Log.action.in_(('queries', 'shortner', 'sql_json')), M.Log.user_id == user_id, ), ) .order_by(M.Log.dttm.desc()) .limit(limit) ) payload = [] for log in qry.all(): item_url = None item_title = None if log.Dashboard: item_url = log.Dashboard.url item_title = log.Dashboard.dashboard_title elif log.Slice: item_url = log.Slice.slice_url item_title = log.Slice.slice_name payload.append({ 'action': log.Log.action, 'item_url': item_url, 'item_title': item_title, 'time': log.Log.dttm, }) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
def set_ss_value(tag, value): """ Setter for data that also work with implicit transfersyntax :param value: the value to set on the tag :param tag: the tag to read """ if tag.VR == 'OB' or tag.VR == 'UN': value = struct.pack('h', value) tag.value = value
Setter for data that also work with implicit transfersyntax :param value: the value to set on the tag :param tag: the tag to read
Below is the the instruction that describes the task: ### Input: Setter for data that also work with implicit transfersyntax :param value: the value to set on the tag :param tag: the tag to read ### Response: def set_ss_value(tag, value): """ Setter for data that also work with implicit transfersyntax :param value: the value to set on the tag :param tag: the tag to read """ if tag.VR == 'OB' or tag.VR == 'UN': value = struct.pack('h', value) tag.value = value
def getRenderers(filename): """For a given DP, returns a list of renderer ids giving the renderers that support the source file type""" global available_renderers renderers = [] for rdrid, (renderer, module) in available_renderers.items(): try: priority = renderer.canRender(filename) except: print("""Error in renderer: %s.canRender("%s"):""" % (rdrid, filename)) traceback.print_exc() priority = None if priority: renderers.append((priority, rdrid)) # sort by priority renderers.sort(lambda a, b: cmp(a[0], b[0])) # return list of IDs. Note that "none" should always be available and working return [a[1] for a in renderers] or ["link"]
For a given DP, returns a list of renderer ids giving the renderers that support the source file type
Below is the the instruction that describes the task: ### Input: For a given DP, returns a list of renderer ids giving the renderers that support the source file type ### Response: def getRenderers(filename): """For a given DP, returns a list of renderer ids giving the renderers that support the source file type""" global available_renderers renderers = [] for rdrid, (renderer, module) in available_renderers.items(): try: priority = renderer.canRender(filename) except: print("""Error in renderer: %s.canRender("%s"):""" % (rdrid, filename)) traceback.print_exc() priority = None if priority: renderers.append((priority, rdrid)) # sort by priority renderers.sort(lambda a, b: cmp(a[0], b[0])) # return list of IDs. Note that "none" should always be available and working return [a[1] for a in renderers] or ["link"]
def _send_request(self, request): """Establishes connection and returns http response based off of request. :param request: HTTPRequest object :type request: :class:`tincan.http_request.HTTPRequest` :returns: LRS Response object :rtype: :class:`tincan.lrs_response.LRSResponse` """ headers = {"X-Experience-API-Version": self.version} if self.auth is not None: headers["Authorization"] = self.auth headers.update(request.headers) params = request.query_params params = {k: unicode(params[k]).encode('utf-8') for k in params.keys()} params = urllib.urlencode(params) if request.resource.startswith('http'): url = request.resource else: url = self.endpoint url += request.resource parsed = urlparse(url) if parsed.scheme == "https": web_req = httplib.HTTPSConnection(parsed.hostname, parsed.port) else: web_req = httplib.HTTPConnection(parsed.hostname, parsed.port) path = parsed.path if parsed.query or parsed.path: path += "?" if parsed.query: path += parsed.query if params: path += params if hasattr(request, "content") and request.content is not None: web_req.request( method=request.method, url=path, body=request.content, headers=headers, ) else: web_req.request( method=request.method, url=path, headers=headers, ) response = web_req.getresponse() data = response.read() web_req.close() if (200 <= response.status < 300 or (response.status == 404 and hasattr(request, "ignore404") and request.ignore404)): success = True else: success = False return LRSResponse( success=success, request=request, response=response, data=data, )
Establishes connection and returns http response based off of request. :param request: HTTPRequest object :type request: :class:`tincan.http_request.HTTPRequest` :returns: LRS Response object :rtype: :class:`tincan.lrs_response.LRSResponse`
Below is the the instruction that describes the task: ### Input: Establishes connection and returns http response based off of request. :param request: HTTPRequest object :type request: :class:`tincan.http_request.HTTPRequest` :returns: LRS Response object :rtype: :class:`tincan.lrs_response.LRSResponse` ### Response: def _send_request(self, request): """Establishes connection and returns http response based off of request. :param request: HTTPRequest object :type request: :class:`tincan.http_request.HTTPRequest` :returns: LRS Response object :rtype: :class:`tincan.lrs_response.LRSResponse` """ headers = {"X-Experience-API-Version": self.version} if self.auth is not None: headers["Authorization"] = self.auth headers.update(request.headers) params = request.query_params params = {k: unicode(params[k]).encode('utf-8') for k in params.keys()} params = urllib.urlencode(params) if request.resource.startswith('http'): url = request.resource else: url = self.endpoint url += request.resource parsed = urlparse(url) if parsed.scheme == "https": web_req = httplib.HTTPSConnection(parsed.hostname, parsed.port) else: web_req = httplib.HTTPConnection(parsed.hostname, parsed.port) path = parsed.path if parsed.query or parsed.path: path += "?" if parsed.query: path += parsed.query if params: path += params if hasattr(request, "content") and request.content is not None: web_req.request( method=request.method, url=path, body=request.content, headers=headers, ) else: web_req.request( method=request.method, url=path, headers=headers, ) response = web_req.getresponse() data = response.read() web_req.close() if (200 <= response.status < 300 or (response.status == 404 and hasattr(request, "ignore404") and request.ignore404)): success = True else: success = False return LRSResponse( success=success, request=request, response=response, data=data, )
def read(self, entity=None, attrs=None, ignore=None, params=None): """Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_. """ if attrs is None: attrs = self.update_json([]) if ignore is None: ignore = set() ignore.add('account_password') return super(AuthSourceLDAP, self).read(entity, attrs, ignore, params)
Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_.
Below is the the instruction that describes the task: ### Input: Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_. ### Response: def read(self, entity=None, attrs=None, ignore=None, params=None): """Do not read the ``account_password`` attribute. Work around a bug. For more information, see `Bugzilla #1243036 <https://bugzilla.redhat.com/show_bug.cgi?id=1243036>`_. """ if attrs is None: attrs = self.update_json([]) if ignore is None: ignore = set() ignore.add('account_password') return super(AuthSourceLDAP, self).read(entity, attrs, ignore, params)
def send_frame(self, frame): """ Sends a frame to the other end of the connection. """ self._sendbuf += self._send_streamify(frame) self._sendbuf_event.set()
Sends a frame to the other end of the connection.
Below is the the instruction that describes the task: ### Input: Sends a frame to the other end of the connection. ### Response: def send_frame(self, frame): """ Sends a frame to the other end of the connection. """ self._sendbuf += self._send_streamify(frame) self._sendbuf_event.set()
def tempo_account_delete_customer_by_id(self, customer_id=1): """ Delete an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer. :param customer_id: id of Customer record :return: Customer info """ url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id) return self.delete(url)
Delete an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer. :param customer_id: id of Customer record :return: Customer info
Below is the the instruction that describes the task: ### Input: Delete an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer. :param customer_id: id of Customer record :return: Customer info ### Response: def tempo_account_delete_customer_by_id(self, customer_id=1): """ Delete an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer. :param customer_id: id of Customer record :return: Customer info """ url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id) return self.delete(url)
async def set_setback_temp(self, sb_temp, timeout=OTGW_DEFAULT_TIMEOUT): """ Configure the setback temperature to use in combination with GPIO functions HOME (5) and AWAY (6). Return the new setback temperature, or None on failure. This method is a coroutine """ cmd = OTGW_CMD_SETBACK status = {} ret = await self._wait_for_cmd(cmd, sb_temp, timeout) if ret is None: return ret = float(ret) status[OTGW_SB_TEMP] = ret self._update_status(status) return ret
Configure the setback temperature to use in combination with GPIO functions HOME (5) and AWAY (6). Return the new setback temperature, or None on failure. This method is a coroutine
Below is the the instruction that describes the task: ### Input: Configure the setback temperature to use in combination with GPIO functions HOME (5) and AWAY (6). Return the new setback temperature, or None on failure. This method is a coroutine ### Response: async def set_setback_temp(self, sb_temp, timeout=OTGW_DEFAULT_TIMEOUT): """ Configure the setback temperature to use in combination with GPIO functions HOME (5) and AWAY (6). Return the new setback temperature, or None on failure. This method is a coroutine """ cmd = OTGW_CMD_SETBACK status = {} ret = await self._wait_for_cmd(cmd, sb_temp, timeout) if ret is None: return ret = float(ret) status[OTGW_SB_TEMP] = ret self._update_status(status) return ret
def to_dict(self): # cls, self): """Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back. """ config = { 'stdopt': self.stdopt, 'attachopt': self.attachopt, 'attachvalue': self.attachvalue, 'auto2dashes': self.auto2dashes, 'case_sensitive': self.case_sensitive, 'namedoptions': self.namedoptions, 'appearedonly': self.appeared_only, 'optionsfirst': self.options_first, 'option_name': self.option_name, 'usage_name': self.usage_name, 'name': self.name, 'help': self.help, 'version': self.version } text = { 'doc': self.doc, 'usage_text': self.usage_text, 'option_sections': self.option_sections, } # option = [convert_2_dict(x) for x in self.options] option = {} for title, options in self.options.items(): option[title] = [convert_2_dict(x) for x in options] usage = [convert_2_dict(x) for x in self.usages] return { '__version__': self._version, '__class__': 'Docpie', '__config__': config, '__text__': text, 'option': option, 'usage': usage, 'option_names': [list(x) for x in self.opt_names], 'opt_names_required_max_args': self.opt_names_required_max_args }
Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back.
Below is the the instruction that describes the task: ### Input: Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back. ### Response: def to_dict(self): # cls, self): """Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back. """ config = { 'stdopt': self.stdopt, 'attachopt': self.attachopt, 'attachvalue': self.attachvalue, 'auto2dashes': self.auto2dashes, 'case_sensitive': self.case_sensitive, 'namedoptions': self.namedoptions, 'appearedonly': self.appeared_only, 'optionsfirst': self.options_first, 'option_name': self.option_name, 'usage_name': self.usage_name, 'name': self.name, 'help': self.help, 'version': self.version } text = { 'doc': self.doc, 'usage_text': self.usage_text, 'option_sections': self.option_sections, } # option = [convert_2_dict(x) for x in self.options] option = {} for title, options in self.options.items(): option[title] = [convert_2_dict(x) for x in options] usage = [convert_2_dict(x) for x in self.usages] return { '__version__': self._version, '__class__': 'Docpie', '__config__': config, '__text__': text, 'option': option, 'usage': usage, 'option_names': [list(x) for x in self.opt_names], 'opt_names_required_max_args': self.opt_names_required_max_args }
def _quit(self, *args): """ quit crash """ self.logger.warn('Bye!') sys.exit(self.exit())
quit crash
Below is the the instruction that describes the task: ### Input: quit crash ### Response: def _quit(self, *args): """ quit crash """ self.logger.warn('Bye!') sys.exit(self.exit())
def get_or_add_media_part(self, media): """Return a |MediaPart| object containing the media in *media*. If this package already contains a media part for the same bytestream, that instance is returned, otherwise a new media part is created. """ media_part = self._find_by_sha1(media.sha1) if media_part is None: media_part = MediaPart.new(self._package, media) return media_part
Return a |MediaPart| object containing the media in *media*. If this package already contains a media part for the same bytestream, that instance is returned, otherwise a new media part is created.
Below is the the instruction that describes the task: ### Input: Return a |MediaPart| object containing the media in *media*. If this package already contains a media part for the same bytestream, that instance is returned, otherwise a new media part is created. ### Response: def get_or_add_media_part(self, media): """Return a |MediaPart| object containing the media in *media*. If this package already contains a media part for the same bytestream, that instance is returned, otherwise a new media part is created. """ media_part = self._find_by_sha1(media.sha1) if media_part is None: media_part = MediaPart.new(self._package, media) return media_part
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]: """Read and parse yaml file.""" logger.debug("Input file: %s", filename) with open(filename, "r") as stream: structure = yaml.safe_load(stream) return structure
Read and parse yaml file.
Below is the the instruction that describes the task: ### Input: Read and parse yaml file. ### Response: def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]: """Read and parse yaml file.""" logger.debug("Input file: %s", filename) with open(filename, "r") as stream: structure = yaml.safe_load(stream) return structure
def __push_params(self, tid, params): """ Remembers the arguments tuple for the last call to the hooked function from this thread. @type tid: int @param tid: Thread global ID. @type params: tuple( arg, arg, arg... ) @param params: Tuple of arguments. """ stack = self.__paramStack.get( tid, [] ) stack.append(params) self.__paramStack[tid] = stack
Remembers the arguments tuple for the last call to the hooked function from this thread. @type tid: int @param tid: Thread global ID. @type params: tuple( arg, arg, arg... ) @param params: Tuple of arguments.
Below is the the instruction that describes the task: ### Input: Remembers the arguments tuple for the last call to the hooked function from this thread. @type tid: int @param tid: Thread global ID. @type params: tuple( arg, arg, arg... ) @param params: Tuple of arguments. ### Response: def __push_params(self, tid, params): """ Remembers the arguments tuple for the last call to the hooked function from this thread. @type tid: int @param tid: Thread global ID. @type params: tuple( arg, arg, arg... ) @param params: Tuple of arguments. """ stack = self.__paramStack.get( tid, [] ) stack.append(params) self.__paramStack[tid] = stack
def get_popular_decks(self, **params: keys): """Get a list of most queried decks \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*max: Optional[int] = None Limit the number of items returned in the response \*\*page: Optional[int] = None Works with max, the zero-based page of the items \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.POPULAR + '/decks' return self._get_model(url, **params)
Get a list of most queried decks \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*max: Optional[int] = None Limit the number of items returned in the response \*\*page: Optional[int] = None Works with max, the zero-based page of the items \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
Below is the the instruction that describes the task: ### Input: Get a list of most queried decks \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*max: Optional[int] = None Limit the number of items returned in the response \*\*page: Optional[int] = None Works with max, the zero-based page of the items \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout ### Response: def get_popular_decks(self, **params: keys): """Get a list of most queried decks \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*max: Optional[int] = None Limit the number of items returned in the response \*\*page: Optional[int] = None Works with max, the zero-based page of the items \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.POPULAR + '/decks' return self._get_model(url, **params)
def edit_message_text(text, chat_id=None, message_id=None, inline_message_id=None, parse_mode=None, disable_web_page_preview=None, reply_markup=None, **kwargs): """ Use this method to edit text messages sent by the bot or via the bot (for inline bots). :param text: New text of the message :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) the target channel (in the format @channelusername) :param message_id: Required if inline_message_id is not specified. Unique identifier of the sent message :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param disable_web_page_preview: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param reply_markup: A JSON-serialized object for an inline keyboard. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type text: str :type chat_id: str or int :type message_id: int :type inline_message_id: str :type parse_mode: str :type disable_web_page_preview: bool :type reply_markup: InlineKeyboardMarkup :returns: On success, the edited Message is returned. :rtype: Message """ if not chat_id and not message_id and not inline_message_id: raise ValueError("Must specify chat_id and message_id or inline_message_id") if (chat_id and not message_id) or (not chat_id and message_id): raise ValueError("Must specify chat_id and message_id together") # required args params = dict( text=text, ) # optional args params.update( _clean_params( chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup ) ) return TelegramBotRPCRequest('editMessageText', params=params, on_result=Message.from_result, **kwargs)
Use this method to edit text messages sent by the bot or via the bot (for inline bots). :param text: New text of the message :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) the target channel (in the format @channelusername) :param message_id: Required if inline_message_id is not specified. Unique identifier of the sent message :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param disable_web_page_preview: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param reply_markup: A JSON-serialized object for an inline keyboard. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type text: str :type chat_id: str or int :type message_id: int :type inline_message_id: str :type parse_mode: str :type disable_web_page_preview: bool :type reply_markup: InlineKeyboardMarkup :returns: On success, the edited Message is returned. :rtype: Message
Below is the the instruction that describes the task: ### Input: Use this method to edit text messages sent by the bot or via the bot (for inline bots). :param text: New text of the message :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) the target channel (in the format @channelusername) :param message_id: Required if inline_message_id is not specified. Unique identifier of the sent message :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param disable_web_page_preview: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param reply_markup: A JSON-serialized object for an inline keyboard. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type text: str :type chat_id: str or int :type message_id: int :type inline_message_id: str :type parse_mode: str :type disable_web_page_preview: bool :type reply_markup: InlineKeyboardMarkup :returns: On success, the edited Message is returned. :rtype: Message ### Response: def edit_message_text(text, chat_id=None, message_id=None, inline_message_id=None, parse_mode=None, disable_web_page_preview=None, reply_markup=None, **kwargs): """ Use this method to edit text messages sent by the bot or via the bot (for inline bots). :param text: New text of the message :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) the target channel (in the format @channelusername) :param message_id: Required if inline_message_id is not specified. Unique identifier of the sent message :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param disable_web_page_preview: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param reply_markup: A JSON-serialized object for an inline keyboard. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type text: str :type chat_id: str or int :type message_id: int :type inline_message_id: str :type parse_mode: str :type disable_web_page_preview: bool :type reply_markup: InlineKeyboardMarkup :returns: On success, the edited Message is returned. :rtype: Message """ if not chat_id and not message_id and not inline_message_id: raise ValueError("Must specify chat_id and message_id or inline_message_id") if (chat_id and not message_id) or (not chat_id and message_id): raise ValueError("Must specify chat_id and message_id together") # required args params = dict( text=text, ) # optional args params.update( _clean_params( chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup ) ) return TelegramBotRPCRequest('editMessageText', params=params, on_result=Message.from_result, **kwargs)
def append_vobject(self, vcard, filename=None): """Appends an address to the Abook addressbook vcard -- vCard to append filename -- unused return the new UID of the appended vcard """ book = ConfigParser(default_section='format') with self._lock: book.read(self._filename) section = str(max([-1] + [int(k) for k in book.sections()]) + 1) Abook.to_abook(vcard, section, book, self._filename) with open(self._filename, 'w') as fp: book.write(fp, False) return Abook._gen_uid(book[section])
Appends an address to the Abook addressbook vcard -- vCard to append filename -- unused return the new UID of the appended vcard
Below is the the instruction that describes the task: ### Input: Appends an address to the Abook addressbook vcard -- vCard to append filename -- unused return the new UID of the appended vcard ### Response: def append_vobject(self, vcard, filename=None): """Appends an address to the Abook addressbook vcard -- vCard to append filename -- unused return the new UID of the appended vcard """ book = ConfigParser(default_section='format') with self._lock: book.read(self._filename) section = str(max([-1] + [int(k) for k in book.sections()]) + 1) Abook.to_abook(vcard, section, book, self._filename) with open(self._filename, 'w') as fp: book.write(fp, False) return Abook._gen_uid(book[section])