code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo): """Draw a set of polygons from a vector tile.""" coords = [] for polygon in polygons: coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon]) # Polygons are expensive to draw and the buildings layer is huge - so we convert to # lines in order to process updates fast enough to animate. if "type" in feature["properties"] and "building" in feature["properties"]["type"]: for line in coords: self._draw_lines_internal(line, colour, bg) else: self._screen.fill_polygon(coords, colour=colour, bg=bg)
Draw a set of polygons from a vector tile.
Below is the the instruction that describes the task: ### Input: Draw a set of polygons from a vector tile. ### Response: def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo): """Draw a set of polygons from a vector tile.""" coords = [] for polygon in polygons: coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon]) # Polygons are expensive to draw and the buildings layer is huge - so we convert to # lines in order to process updates fast enough to animate. if "type" in feature["properties"] and "building" in feature["properties"]["type"]: for line in coords: self._draw_lines_internal(line, colour, bg) else: self._screen.fill_polygon(coords, colour=colour, bg=bg)
def Define_TreeTable(self, heads, heads2=None): ''' Define a TreeTable with a heading row and optionally a second heading row. ''' display_heads = [] display_heads.append(tuple(heads[2:])) self.tree_table = TreeTable() self.tree_table.append_from_list(display_heads, fill_title=True) if heads2 is not None: heads2_color = heads2[1] row_widget = gui.TableRow() for index, field in enumerate(heads2[2:]): row_item = gui.TableItem(text=field, style={'background-color': heads2_color}) row_widget.append(row_item, field) self.tree_table.append(row_widget, heads2[0]) self.wid.append(self.tree_table)
Define a TreeTable with a heading row and optionally a second heading row.
Below is the the instruction that describes the task: ### Input: Define a TreeTable with a heading row and optionally a second heading row. ### Response: def Define_TreeTable(self, heads, heads2=None): ''' Define a TreeTable with a heading row and optionally a second heading row. ''' display_heads = [] display_heads.append(tuple(heads[2:])) self.tree_table = TreeTable() self.tree_table.append_from_list(display_heads, fill_title=True) if heads2 is not None: heads2_color = heads2[1] row_widget = gui.TableRow() for index, field in enumerate(heads2[2:]): row_item = gui.TableItem(text=field, style={'background-color': heads2_color}) row_widget.append(row_item, field) self.tree_table.append(row_widget, heads2[0]) self.wid.append(self.tree_table)
def interprocess_locked(path): """Acquires & releases a interprocess lock around call into decorated function.""" lock = InterProcessLock(path) def decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return decorator
Acquires & releases a interprocess lock around call into decorated function.
Below is the the instruction that describes the task: ### Input: Acquires & releases a interprocess lock around call into decorated function. ### Response: def interprocess_locked(path): """Acquires & releases a interprocess lock around call into decorated function.""" lock = InterProcessLock(path) def decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return decorator
def _merge_fields(a, b): """Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first. """ a_names = set(x[0] for x in a) b_names = set(x[0] for x in b) a_keep = a_names - b_names fields = [] for name, field in a: if name in a_keep: fields.append((name, field)) fields.extend(b) return fields
Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first.
Below is the the instruction that describes the task: ### Input: Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first. ### Response: def _merge_fields(a, b): """Merge two lists of fields. Fields in `b` override fields in `a`. Fields in `a` are output first. """ a_names = set(x[0] for x in a) b_names = set(x[0] for x in b) a_keep = a_names - b_names fields = [] for name, field in a: if name in a_keep: fields.append((name, field)) fields.extend(b) return fields
def set_params_value(self, *params): """ This interface is used to set parameter value for an function in abi file. """ if len(params) != len(self.parameters): raise Exception("parameter error") temp = self.parameters self.parameters = [] for i in range(len(params)): self.parameters.append(Parameter(temp[i]['name'], temp[i]['type'])) self.parameters[i].set_value(params[i])
This interface is used to set parameter value for an function in abi file.
Below is the the instruction that describes the task: ### Input: This interface is used to set parameter value for an function in abi file. ### Response: def set_params_value(self, *params): """ This interface is used to set parameter value for an function in abi file. """ if len(params) != len(self.parameters): raise Exception("parameter error") temp = self.parameters self.parameters = [] for i in range(len(params)): self.parameters.append(Parameter(temp[i]['name'], temp[i]['type'])) self.parameters[i].set_value(params[i])
def get_all_loopbacks(engine): """ Get all loopback interfaces for a given engine """ data = [] if 'fw_cluster' in engine.type: for cvi in engine.data.get('loopback_cluster_virtual_interface', []): data.append( LoopbackClusterInterface(cvi, engine)) for node in engine.nodes: for lb in node.data.get('loopback_node_dedicated_interface', []): data.append(LoopbackInterface(lb, engine)) return data
Get all loopback interfaces for a given engine
Below is the the instruction that describes the task: ### Input: Get all loopback interfaces for a given engine ### Response: def get_all_loopbacks(engine): """ Get all loopback interfaces for a given engine """ data = [] if 'fw_cluster' in engine.type: for cvi in engine.data.get('loopback_cluster_virtual_interface', []): data.append( LoopbackClusterInterface(cvi, engine)) for node in engine.nodes: for lb in node.data.get('loopback_node_dedicated_interface', []): data.append(LoopbackInterface(lb, engine)) return data
def run(command, encoding=None, decode=True, cwd=None): """Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error. """ if not encoding: encoding = locale.getpreferredencoding() try: with open(os.devnull, 'rb') as devnull: pipe = subprocess.Popen(command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) except OSError as e: raise Failure("could not run %s: %s" % (command, e)) output = pipe.communicate()[0] if decode: output = output.decode(encoding) status = pipe.wait() if status != 0: raise CommandFailed(command, status, output) return output
Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error.
Below is the the instruction that describes the task: ### Input: Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error. ### Response: def run(command, encoding=None, decode=True, cwd=None): """Run a command [cmd, arg1, arg2, ...]. Returns the output (stdout + stderr). Raises CommandFailed in cases of error. """ if not encoding: encoding = locale.getpreferredencoding() try: with open(os.devnull, 'rb') as devnull: pipe = subprocess.Popen(command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) except OSError as e: raise Failure("could not run %s: %s" % (command, e)) output = pipe.communicate()[0] if decode: output = output.decode(encoding) status = pipe.wait() if status != 0: raise CommandFailed(command, status, output) return output
def guest_reboot(self, userid): """Reboot a guest vm.""" LOG.info("Begin to reboot vm %s", userid) self._smtclient.guest_reboot(userid) LOG.info("Complete reboot vm %s", userid)
Reboot a guest vm.
Below is the the instruction that describes the task: ### Input: Reboot a guest vm. ### Response: def guest_reboot(self, userid): """Reboot a guest vm.""" LOG.info("Begin to reboot vm %s", userid) self._smtclient.guest_reboot(userid) LOG.info("Complete reboot vm %s", userid)
def currentPixmapRect(self): """ Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect> """ pixmap = self.currentPixmap() rect = self.rect() size = pixmap.size() x = rect.center().x() - (size.width() / 2.0) y = rect.center().y() - (size.height() / 2.0) return QtCore.QRect(x, y, size.width(), size.height())
Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect>
Below is the the instruction that describes the task: ### Input: Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect> ### Response: def currentPixmapRect(self): """ Returns the rect that defines the boundary for the current pixmap based on the size of the button and the size of the pixmap. :return <QtCore.QRect> """ pixmap = self.currentPixmap() rect = self.rect() size = pixmap.size() x = rect.center().x() - (size.width() / 2.0) y = rect.center().y() - (size.height() / 2.0) return QtCore.QRect(x, y, size.width(), size.height())
def getp(self, name): """ Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. """ name = self._mapping.get(name,name) return self.params[name]
Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object.
Below is the the instruction that describes the task: ### Input: Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. ### Response: def getp(self, name): """ Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. """ name = self._mapping.get(name,name) return self.params[name]
def update(self, volume, display_name=None, display_description=None): """ Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised. """ return volume.update(display_name=display_name, display_description=display_description)
Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised.
Below is the the instruction that describes the task: ### Input: Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised. ### Response: def update(self, volume, display_name=None, display_description=None): """ Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised. """ return volume.update(display_name=display_name, display_description=display_description)
def build_year(self, dt): """ Build the page for the provided year. """ self.year = str(dt.year) logger.debug("Building %s" % self.year) self.request = self.create_request(self.get_url()) target_path = self.get_build_path() self.build_file(target_path, self.get_content())
Build the page for the provided year.
Below is the the instruction that describes the task: ### Input: Build the page for the provided year. ### Response: def build_year(self, dt): """ Build the page for the provided year. """ self.year = str(dt.year) logger.debug("Building %s" % self.year) self.request = self.create_request(self.get_url()) target_path = self.get_build_path() self.build_file(target_path, self.get_content())
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
Convert ``filename`` to a NIFTI file using ``mri_convert``
Below is the the instruction that describes the task: ### Input: Convert ``filename`` to a NIFTI file using ``mri_convert`` ### Response: def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
def _set_cfg(self, v, load=False): """ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cfg must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__cfg = t if hasattr(self, '_set'): self._set()
Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly.
Below is the the instruction that describes the task: ### Input: Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. ### Response: def _set_cfg(self, v, load=False): """ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cfg must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__cfg = t if hasattr(self, '_set'): self._set()
def _create_query(node, context): """Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query. """ visited_nodes = [node] output_columns = _get_output_columns(visited_nodes, context) filters = _get_filters(visited_nodes, context) selectable = sql_context_helpers.get_node_selectable(node, context) query = select(output_columns).select_from(selectable).where(and_(*filters)) return query
Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query.
Below is the the instruction that describes the task: ### Input: Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query. ### Response: def _create_query(node, context): """Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query. """ visited_nodes = [node] output_columns = _get_output_columns(visited_nodes, context) filters = _get_filters(visited_nodes, context) selectable = sql_context_helpers.get_node_selectable(node, context) query = select(output_columns).select_from(selectable).where(and_(*filters)) return query
def as_number(self): """ >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 """ def combine(subver, ver): return subver / 10 + ver return reduce(combine, reversed(self.version))
>>> round(SummableVersion('1.9.3').as_number(), 12) 1.93
Below is the the instruction that describes the task: ### Input: >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 ### Response: def as_number(self): """ >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 """ def combine(subver, ver): return subver / 10 + ver return reduce(combine, reversed(self.version))
def help_cli_search(self): """ Help for Workbench CLI Search """ help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % (color.Yellow, color.Green) help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % (color.Green) help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green) help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % (color.LightBlue) help += '\n\n\t%sRun workers on this sample_set:' % (color.Green) help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % (color.LightBlue, color.Normal) help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % (color.Green) help += '\n\t%s> for output in pe_outputs: %s' % (color.LightBlue, color.Normal) help += '\n\t\t%s print output %s' % (color.LightBlue, color.Normal) return help
Help for Workbench CLI Search
Below is the the instruction that describes the task: ### Input: Help for Workbench CLI Search ### Response: def help_cli_search(self): """ Help for Workbench CLI Search """ help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % (color.Yellow, color.Green) help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % (color.Green) help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green) help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % (color.LightBlue) help += '\n\n\t%sRun workers on this sample_set:' % (color.Green) help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % (color.LightBlue, color.Normal) help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % (color.Green) help += '\n\t%s> for output in pe_outputs: %s' % (color.LightBlue, color.Normal) help += '\n\t\t%s print output %s' % (color.LightBlue, color.Normal) return help
def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''): ''' Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label. ''' _, ext = os.path.splitext(filename) if ext != '.png': filename += '.png' gnuplot_cmds = \ ''' set datafile separator "," set term pngcairo size 30cm,25cm set out filename unset key set border lw 1.5 set grid lt -1 lc rgb "gray80" set title title set xlabel x_label set ylabel y_label plot filename_data u 1:2 w lp pt 6 ps 0.5 ''' scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataTemp(x, y) args_dict = { 'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label } gnuplot(scr.name, args_dict)
Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label.
Below is the the instruction that describes the task: ### Input: Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label. ### Response: def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''): ''' Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label. ''' _, ext = os.path.splitext(filename) if ext != '.png': filename += '.png' gnuplot_cmds = \ ''' set datafile separator "," set term pngcairo size 30cm,25cm set out filename unset key set border lw 1.5 set grid lt -1 lc rgb "gray80" set title title set xlabel x_label set ylabel y_label plot filename_data u 1:2 w lp pt 6 ps 0.5 ''' scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataTemp(x, y) args_dict = { 'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label } gnuplot(scr.name, args_dict)
def _get_status_tokens(self): " The tokens for the status bar. " result = [] # Display panes. for i, w in enumerate(self.pymux.arrangement.windows): if i > 0: result.append(('', ' ')) if w == self.pymux.arrangement.get_active_window(): style = 'class:window.current' format_str = self.pymux.window_status_current_format else: style = 'class:window' format_str = self.pymux.window_status_format result.append(( style, format_pymux_string(self.pymux, format_str, window=w), self._create_select_window_handler(w))) return result
The tokens for the status bar.
Below is the the instruction that describes the task: ### Input: The tokens for the status bar. ### Response: def _get_status_tokens(self): " The tokens for the status bar. " result = [] # Display panes. for i, w in enumerate(self.pymux.arrangement.windows): if i > 0: result.append(('', ' ')) if w == self.pymux.arrangement.get_active_window(): style = 'class:window.current' format_str = self.pymux.window_status_current_format else: style = 'class:window' format_str = self.pymux.window_status_format result.append(( style, format_pymux_string(self.pymux, format_str, window=w), self._create_select_window_handler(w))) return result
def _build_contract_creation_tx_with_valid_signature(self, tx_dict: Dict[str, None], s: int) -> Transaction: """ Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract """ zero_address = HexBytes('0x' + '0' * 40) f_address = HexBytes('0x' + 'f' * 40) nonce = tx_dict['nonce'] gas_price = tx_dict['gasPrice'] gas = tx_dict['gas'] to = tx_dict.get('to', b'') # Contract creation should always have `to` empty value = tx_dict['value'] data = tx_dict['data'] for _ in range(100): try: v, r = self.find_valid_random_signature(s) contract_creation_tx = Transaction(nonce, gas_price, gas, to, value, HexBytes(data), v=v, r=r, s=s) sender_address = contract_creation_tx.sender contract_address = contract_creation_tx.creates if sender_address in (zero_address, f_address) or contract_address in (zero_address, f_address): raise InvalidTransaction return contract_creation_tx except InvalidTransaction: pass raise ValueError('Valid signature not found with s=%d', s)
Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract
Below is the the instruction that describes the task: ### Input: Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract ### Response: def _build_contract_creation_tx_with_valid_signature(self, tx_dict: Dict[str, None], s: int) -> Transaction: """ Use pyethereum `Transaction` to generate valid tx using a random signature :param tx_dict: Web3 tx dictionary :param s: Signature s value :return: PyEthereum creation tx for the proxy contract """ zero_address = HexBytes('0x' + '0' * 40) f_address = HexBytes('0x' + 'f' * 40) nonce = tx_dict['nonce'] gas_price = tx_dict['gasPrice'] gas = tx_dict['gas'] to = tx_dict.get('to', b'') # Contract creation should always have `to` empty value = tx_dict['value'] data = tx_dict['data'] for _ in range(100): try: v, r = self.find_valid_random_signature(s) contract_creation_tx = Transaction(nonce, gas_price, gas, to, value, HexBytes(data), v=v, r=r, s=s) sender_address = contract_creation_tx.sender contract_address = contract_creation_tx.creates if sender_address in (zero_address, f_address) or contract_address in (zero_address, f_address): raise InvalidTransaction return contract_creation_tx except InvalidTransaction: pass raise ValueError('Valid signature not found with s=%d', s)
def getAllConfig(self, fmt='json'): """ return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict' """ for e in self.getCtrlConf(msgout=False): self._lattice_confdict.update(e.dumpConfig(type='simu')) self._lattice_confdict.update(self._lattice.dumpConfig()) if fmt == 'json': return json.dumps(self._lattice_confdict) else: return self._lattice_confdict
return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict'
Below is the the instruction that describes the task: ### Input: return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict' ### Response: def getAllConfig(self, fmt='json'): """ return all element configurations as json string file. could be further processed by beamline.Lattice class :param fmt: 'json' (default) or 'dict' """ for e in self.getCtrlConf(msgout=False): self._lattice_confdict.update(e.dumpConfig(type='simu')) self._lattice_confdict.update(self._lattice.dumpConfig()) if fmt == 'json': return json.dumps(self._lattice_confdict) else: return self._lattice_confdict
def kwonly_args(kws, required, withdefaults=(), leftovers=False): """ Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT """ if hasattr(withdefaults, 'items'): # allows for OrderedDict to be passed withdefaults = withdefaults.items() kwonly = [] # extract the required keyword-only arguments missing = [] for name in required: if name not in kws: missing.append(name) else: kwonly.append(kws.pop(name)) # validate required keyword-only arguments if missing: if len(missing) > 2: end = 's: %s, and %s' % (', '.join(missing[:-1]), missing[-1]) elif len(missing) == 2: end = 's: %s and %s' % tuple(missing) else: end = ': %s' % tuple(missing) msg = 'missing %s required keyword-only argument%s' raise TypeError(msg % (len(missing), end)) # handle the withdefaults for name, value in withdefaults: if name not in kws: kwonly.append(value) else: kwonly.append(kws.pop(name)) # handle any leftovers if not leftovers and kws: msg = "got an unexpected keyword argument '%s'" raise TypeError(msg % (kws.keys()[0])) return [kws] + kwonly
Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT
Below is the the instruction that describes the task: ### Input: Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT ### Response: def kwonly_args(kws, required, withdefaults=(), leftovers=False): """ Based on the snippet by Eric Snow http://code.activestate.com/recipes/577940 SPDX-License-Identifier: MIT """ if hasattr(withdefaults, 'items'): # allows for OrderedDict to be passed withdefaults = withdefaults.items() kwonly = [] # extract the required keyword-only arguments missing = [] for name in required: if name not in kws: missing.append(name) else: kwonly.append(kws.pop(name)) # validate required keyword-only arguments if missing: if len(missing) > 2: end = 's: %s, and %s' % (', '.join(missing[:-1]), missing[-1]) elif len(missing) == 2: end = 's: %s and %s' % tuple(missing) else: end = ': %s' % tuple(missing) msg = 'missing %s required keyword-only argument%s' raise TypeError(msg % (len(missing), end)) # handle the withdefaults for name, value in withdefaults: if name not in kws: kwonly.append(value) else: kwonly.append(kws.pop(name)) # handle any leftovers if not leftovers and kws: msg = "got an unexpected keyword argument '%s'" raise TypeError(msg % (kws.keys()[0])) return [kws] + kwonly
def replace(dict,line): """ Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed. """ words = line.split() new_line = "" for word in words: fst = word[0] last = word[-1] # Check if the word ends with a punctuation if last == "," or last == ";" or last == ".": clean_word = word[0:-1] last = last + " " elif last == "]": clean_word = word[0:-1] else: clean_word = word last = " " # Check if the word starts with "[" if fst == "[": clean_word = clean_word[1:] else: clean_word = clean_word fst = "" find = dict.get(clean_word) if find == None: new_line = new_line + fst + str(clean_word) + last else: new_line = new_line + fst + str(find) + last return new_line
Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed.
Below is the the instruction that describes the task: ### Input: Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed. ### Response: def replace(dict,line): """ Find and replace the special words according to the dictionary. Parameters ========== dict : Dictionary A dictionary derived from a yaml file. Source language as keys and the target language as values. line : String A string need to be processed. """ words = line.split() new_line = "" for word in words: fst = word[0] last = word[-1] # Check if the word ends with a punctuation if last == "," or last == ";" or last == ".": clean_word = word[0:-1] last = last + " " elif last == "]": clean_word = word[0:-1] else: clean_word = word last = " " # Check if the word starts with "[" if fst == "[": clean_word = clean_word[1:] else: clean_word = clean_word fst = "" find = dict.get(clean_word) if find == None: new_line = new_line + fst + str(clean_word) + last else: new_line = new_line + fst + str(find) + last return new_line
def _setup_piddir(self): """Create the directory for the PID file if necessary.""" if self.pidfile is None: return piddir = os.path.dirname(self.pidfile) if not os.path.isdir(piddir): # Create the directory with sensible mode and ownership os.makedirs(piddir, 0o777 & ~self.umask) os.chown(piddir, self.uid, self.gid)
Create the directory for the PID file if necessary.
Below is the the instruction that describes the task: ### Input: Create the directory for the PID file if necessary. ### Response: def _setup_piddir(self): """Create the directory for the PID file if necessary.""" if self.pidfile is None: return piddir = os.path.dirname(self.pidfile) if not os.path.isdir(piddir): # Create the directory with sensible mode and ownership os.makedirs(piddir, 0o777 & ~self.umask) os.chown(piddir, self.uid, self.gid)
def wait(self, till=None): """ THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED """ waiter = Signal() if self.waiting: DEBUG and _Log.note("waiting with {{num}} others on {{name|quote}}", num=len(self.waiting), name=self.name, stack_depth=1) self.waiting.insert(0, waiter) else: DEBUG and _Log.note("waiting by self on {{name|quote}}", name=self.name) self.waiting = [waiter] try: self.lock.release() DEBUG and _Log.note("out of lock {{name|quote}}", name=self.name) (waiter | till).wait() if DEBUG: _Log.note("done minimum wait (for signal {{till|quote}})", till=till.name if till else "", name=self.name) except Exception as e: if not _Log: _late_import() _Log.warning("problem", cause=e) finally: self.lock.acquire() DEBUG and _Log.note("re-acquired lock {{name|quote}}", name=self.name) try: self.waiting.remove(waiter) DEBUG and _Log.note("removed own signal from {{name|quote}}", name=self.name) except Exception: pass return bool(waiter)
THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED
Below is the the instruction that describes the task: ### Input: THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED ### Response: def wait(self, till=None): """ THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED :param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL :return: True IF SIGNALED TO GO, False IF till WAS SIGNALED """ waiter = Signal() if self.waiting: DEBUG and _Log.note("waiting with {{num}} others on {{name|quote}}", num=len(self.waiting), name=self.name, stack_depth=1) self.waiting.insert(0, waiter) else: DEBUG and _Log.note("waiting by self on {{name|quote}}", name=self.name) self.waiting = [waiter] try: self.lock.release() DEBUG and _Log.note("out of lock {{name|quote}}", name=self.name) (waiter | till).wait() if DEBUG: _Log.note("done minimum wait (for signal {{till|quote}})", till=till.name if till else "", name=self.name) except Exception as e: if not _Log: _late_import() _Log.warning("problem", cause=e) finally: self.lock.acquire() DEBUG and _Log.note("re-acquired lock {{name|quote}}", name=self.name) try: self.waiting.remove(waiter) DEBUG and _Log.note("removed own signal from {{name|quote}}", name=self.name) except Exception: pass return bool(waiter)
def nphase_border(im, include_diagonals=False): r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values ''' if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') # Get dimension of image ndim = len(np.shape(im)) if ndim not in [2, 3]: raise NotImplementedError("Function only works for 2d and 3d images") # Pad image to handle edges im = np.pad(im, pad_width=1, mode='edge') # Stack rolled images for each neighbor to be inspected stack = _make_stack(im, include_diagonals) # Sort the stack along the last axis stack.sort() out = np.ones_like(im) # Run through stack recording when neighbor id changes # Number of changes is number of unique bordering regions for k in range(np.shape(stack)[ndim])[1:]: if ndim == 2: mask = stack[:, :, k] != stack[:, :, k-1] elif ndim == 3: mask = stack[:, :, :, k] != stack[:, :, :, k-1] out += mask # Un-pad if ndim == 2: return out[1:-1, 1:-1].copy() else: return out[1:-1, 1:-1, 1:-1].copy()
r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values
Below is the the instruction that describes the task: ### Input: r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values ### Response: def nphase_border(im, include_diagonals=False): r''' Identifies the voxels in regions that border *N* other regions. Useful for finding triple-phase boundaries. Parameters ---------- im : ND-array An ND image of the porous material containing discrete values in the pore space identifying different regions. e.g. the result of a snow-partition include_diagonals : boolean When identifying bordering pixels (2D) and voxels (3D) include those shifted along more than one axis Returns ------- image : ND-array A copy of ``im`` with voxel values equal to the number of uniquely different bordering values ''' if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') # Get dimension of image ndim = len(np.shape(im)) if ndim not in [2, 3]: raise NotImplementedError("Function only works for 2d and 3d images") # Pad image to handle edges im = np.pad(im, pad_width=1, mode='edge') # Stack rolled images for each neighbor to be inspected stack = _make_stack(im, include_diagonals) # Sort the stack along the last axis stack.sort() out = np.ones_like(im) # Run through stack recording when neighbor id changes # Number of changes is number of unique bordering regions for k in range(np.shape(stack)[ndim])[1:]: if ndim == 2: mask = stack[:, :, k] != stack[:, :, k-1] elif ndim == 3: mask = stack[:, :, :, k] != stack[:, :, :, k-1] out += mask # Un-pad if ndim == 2: return out[1:-1, 1:-1].copy() else: return out[1:-1, 1:-1, 1:-1].copy()
def check_rollout(edits_service, package_name, days): """Check if package_name has a release on staged rollout for too long""" edit = edits_service.insert(body={}, packageName=package_name).execute() response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute() releases = response['releases'] for release in releases: if release['status'] == 'inProgress': url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name']) resp = requests.head(url) if resp.status_code != 200: if resp.status_code != 404: # 404 is expected for release candidates logger.warning("Could not check %s: %s", url, resp.status_code) continue age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified'])) if age >= days * DAY: yield release, age
Check if package_name has a release on staged rollout for too long
Below is the the instruction that describes the task: ### Input: Check if package_name has a release on staged rollout for too long ### Response: def check_rollout(edits_service, package_name, days): """Check if package_name has a release on staged rollout for too long""" edit = edits_service.insert(body={}, packageName=package_name).execute() response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute() releases = response['releases'] for release in releases: if release['status'] == 'inProgress': url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name']) resp = requests.head(url) if resp.status_code != 200: if resp.status_code != 404: # 404 is expected for release candidates logger.warning("Could not check %s: %s", url, resp.status_code) continue age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified'])) if age >= days * DAY: yield release, age
def default_number_converter(number_str): """ Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits. """ is_int = (number_str.startswith('-') and number_str[1:].isdigit()) or number_str.isdigit() # FIXME: this handles a wider range of numbers than allowed by the json standard, # etc.: float('nan') and float('inf'). But is this a problem? return int(number_str) if is_int else float(number_str)
Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits.
Below is the the instruction that describes the task: ### Input: Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits. ### Response: def default_number_converter(number_str): """ Converts the string representation of a json number into its python object equivalent, an int, long, float or whatever type suits. """ is_int = (number_str.startswith('-') and number_str[1:].isdigit()) or number_str.isdigit() # FIXME: this handles a wider range of numbers than allowed by the json standard, # etc.: float('nan') and float('inf'). But is this a problem? return int(number_str) if is_int else float(number_str)
def train(self, data, target, **kwargs): """ Used in the training phase. Override. """ non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"] self.column_names = [l for l in list(data.columns) if l not in non_predictors] results, folds = self.cross_validate(data, non_predictors, **kwargs) self.gather_results(results, folds, data)
Used in the training phase. Override.
Below is the the instruction that describes the task: ### Input: Used in the training phase. Override. ### Response: def train(self, data, target, **kwargs): """ Used in the training phase. Override. """ non_predictors = [i.replace(" ", "_").lower() for i in list(set(data['team']))] + ["team", "next_year_wins"] self.column_names = [l for l in list(data.columns) if l not in non_predictors] results, folds = self.cross_validate(data, non_predictors, **kwargs) self.gather_results(results, folds, data)
def add_aliases(self_or_cls, **kwargs): """ Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string') """ self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string')
Below is the the instruction that describes the task: ### Input: Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string') ### Response: def add_aliases(self_or_cls, **kwargs): """ Conveniently add new aliases as keyword arguments. For instance you can add a new alias with add_aliases(short='Longer string') """ self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
def get_gebouw_by_id(self, id): ''' Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw` ''' def creator(): res = crab_gateway_request( self.client, 'GetGebouwByIdentificatorGebouw', id ) if res == None: raise GatewayResourceNotFoundException() return Gebouw( res.IdentificatorGebouw, res.AardGebouw, res.StatusGebouw, res.GeometriemethodeGebouw, res.Geometrie, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['short'].is_configured: key = 'GetGebouwByIdentificatorGebouw#%s' % (id) gebouw = self.caches['short'].get_or_create(key, creator) else: gebouw = creator() gebouw.set_gateway(self) return gebouw
Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw`
Below is the the instruction that describes the task: ### Input: Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw` ### Response: def get_gebouw_by_id(self, id): ''' Retrieve a `Gebouw` by the Id. :param integer id: the Id of the `Gebouw` :rtype: :class:`Gebouw` ''' def creator(): res = crab_gateway_request( self.client, 'GetGebouwByIdentificatorGebouw', id ) if res == None: raise GatewayResourceNotFoundException() return Gebouw( res.IdentificatorGebouw, res.AardGebouw, res.StatusGebouw, res.GeometriemethodeGebouw, res.Geometrie, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['short'].is_configured: key = 'GetGebouwByIdentificatorGebouw#%s' % (id) gebouw = self.caches['short'].get_or_create(key, creator) else: gebouw = creator() gebouw.set_gateway(self) return gebouw
def _update_limits_from_api(self): """ Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. """ logger.debug('Setting CloudFormation limits from API') self.connect() resp = self.conn.describe_account_limits() for lim in resp['AccountLimits']: if lim['Name'] == 'StackLimit': self.limits['Stacks']._set_api_limit(lim['Value']) continue logger.debug('API response contained unknown CloudFormation ' 'limit: %s', lim['Name'])
Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information.
Below is the the instruction that describes the task: ### Input: Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. ### Response: def _update_limits_from_api(self): """ Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. """ logger.debug('Setting CloudFormation limits from API') self.connect() resp = self.conn.describe_account_limits() for lim in resp['AccountLimits']: if lim['Name'] == 'StackLimit': self.limits['Stacks']._set_api_limit(lim['Value']) continue logger.debug('API response contained unknown CloudFormation ' 'limit: %s', lim['Name'])
async def load(cls, db, identifier=None, redis_key=None): """Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key. """ if not identifier and not redis_key: raise InvalidQuery('Must supply identifier or redis_key') if redis_key is None: redis_key = cls.make_key(identifier) if await db.exists(redis_key): data = await db.hgetall(redis_key) kwargs = {} for key_bin, value_bin in data.items(): key, value = key_bin, value_bin column = getattr(cls, key, False) if not column or (column.field_type == str): kwargs[key] = value elif column.field_type == datetime: kwargs[key] = datetime.strptime(value, DATETIME_FORMAT) else: kwargs[key] = column.field_type(value) kwargs['loading'] = True return cls(**kwargs) else: logger.debug("No Redis key found: {}".format(redis_key)) return None
Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key.
Below is the the instruction that describes the task: ### Input: Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key. ### Response: async def load(cls, db, identifier=None, redis_key=None): """Load the object from redis. Use the identifier (colon-separated composite keys or the primary key) or the redis_key. """ if not identifier and not redis_key: raise InvalidQuery('Must supply identifier or redis_key') if redis_key is None: redis_key = cls.make_key(identifier) if await db.exists(redis_key): data = await db.hgetall(redis_key) kwargs = {} for key_bin, value_bin in data.items(): key, value = key_bin, value_bin column = getattr(cls, key, False) if not column or (column.field_type == str): kwargs[key] = value elif column.field_type == datetime: kwargs[key] = datetime.strptime(value, DATETIME_FORMAT) else: kwargs[key] = column.field_type(value) kwargs['loading'] = True return cls(**kwargs) else: logger.debug("No Redis key found: {}".format(redis_key)) return None
def from_lal_unit(lunit): """Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input """ return reduce(operator.mul, ( units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for i, exp in enumerate(lunit.unitNumerator)))
Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input
Below is the the instruction that describes the task: ### Input: Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input ### Response: def from_lal_unit(lunit): """Convert a LALUnit` into a `~astropy.units.Unit` Parameters ---------- lunit : `lal.Unit` the input unit Returns ------- unit : `~astropy.units.Unit` the Astropy representation of the input Raises ------ TypeError if ``lunit`` cannot be converted to `lal.Unit` ValueError if Astropy doesn't understand the base units for the input """ return reduce(operator.mul, ( units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for i, exp in enumerate(lunit.unitNumerator)))
def encode_binary_dict(array, buffers): ''' Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict ''' buffer_id = make_id() buf = (dict(id=buffer_id), array.tobytes()) buffers.append(buf) return { '__buffer__' : buffer_id, 'shape' : array.shape, 'dtype' : array.dtype.name, 'order' : sys.byteorder }
Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict
Below is the the instruction that describes the task: ### Input: Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict ### Response: def encode_binary_dict(array, buffers): ''' Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict ''' buffer_id = make_id() buf = (dict(id=buffer_id), array.tobytes()) buffers.append(buf) return { '__buffer__' : buffer_id, 'shape' : array.shape, 'dtype' : array.dtype.name, 'order' : sys.byteorder }
def get_assignments(self, site): """ Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments. """ tools = self.get_tools(site) assignment_tool_filter = [x.href for x in tools if x.name == 'assignment-grades'] if not assignment_tool_filter: return [] assignment_tool_url = assignment_tool_filter[0].href response = self._session.get(assignment_tool_url) response.raise_for_status() iframes = self._html_iface.get_iframes(response.text) iframe_url = '' for frame in iframes: if frame['title'] == 'Assignments ': iframe_url = frame['src'] if iframe_url == '': print "WARNING: NO ASSIGNMENT IFRAMES FOUND" response = self._session.get(iframe_url) response.raise_for_status() assignment_dict_list = self._html_iface.get_assignments(response.text) return [TSquareAssignment(**x) for x in assignment_dict_list]
Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments.
Below is the the instruction that describes the task: ### Input: Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments. ### Response: def get_assignments(self, site): """ Gets a list of assignments associated with a site (class). Returns a list of TSquareAssignment objects. @param site (TSquareSite) - The site to use with the assignment query @returns - A list of TSquareSite objects. May be an empty list if the site has defined no assignments. """ tools = self.get_tools(site) assignment_tool_filter = [x.href for x in tools if x.name == 'assignment-grades'] if not assignment_tool_filter: return [] assignment_tool_url = assignment_tool_filter[0].href response = self._session.get(assignment_tool_url) response.raise_for_status() iframes = self._html_iface.get_iframes(response.text) iframe_url = '' for frame in iframes: if frame['title'] == 'Assignments ': iframe_url = frame['src'] if iframe_url == '': print "WARNING: NO ASSIGNMENT IFRAMES FOUND" response = self._session.get(iframe_url) response.raise_for_status() assignment_dict_list = self._html_iface.get_assignments(response.text) return [TSquareAssignment(**x) for x in assignment_dict_list]
def get_name_dictionary_extractor(name_trie): """Method for creating default name dictionary extractor""" return DictionaryExtractor()\ .set_trie(name_trie)\ .set_pre_filter(VALID_TOKEN_RE.match)\ .set_pre_process(lambda x: x.lower())\ .set_metadata({'extractor': 'dig_name_dictionary_extractor'})
Method for creating default name dictionary extractor
Below is the the instruction that describes the task: ### Input: Method for creating default name dictionary extractor ### Response: def get_name_dictionary_extractor(name_trie): """Method for creating default name dictionary extractor""" return DictionaryExtractor()\ .set_trie(name_trie)\ .set_pre_filter(VALID_TOKEN_RE.match)\ .set_pre_process(lambda x: x.lower())\ .set_metadata({'extractor': 'dig_name_dictionary_extractor'})
def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_statistics = ET.Element("get_tunnel_statistics") config = get_tunnel_statistics output = ET.SubElement(get_tunnel_statistics, "output") tunnel_stat = ET.SubElement(output, "tunnel-stat") rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes") rx_bytes.text = kwargs.pop('rx_bytes') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_statistics = ET.Element("get_tunnel_statistics") config = get_tunnel_statistics output = ET.SubElement(get_tunnel_statistics, "output") tunnel_stat = ET.SubElement(output, "tunnel-stat") rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes") rx_bytes.text = kwargs.pop('rx_bytes') callback = kwargs.pop('callback', self._callback) return callback(config)
def to_record(cls, attr_names, values): """ Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid. """ try: # from a namedtuple to a dict values = values._asdict() except AttributeError: pass try: # from a dictionary to a list return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names] except AttributeError: pass if isinstance(values, (tuple, list)): return [cls.__to_sqlite_element(value) for value in values] raise ValueError("cannot convert from {} to list".format(type(values)))
Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid.
Below is the the instruction that describes the task: ### Input: Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid. ### Response: def to_record(cls, attr_names, values): """ Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid. """ try: # from a namedtuple to a dict values = values._asdict() except AttributeError: pass try: # from a dictionary to a list return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names] except AttributeError: pass if isinstance(values, (tuple, list)): return [cls.__to_sqlite_element(value) for value in values] raise ValueError("cannot convert from {} to list".format(type(values)))
def _get_pdm(cls, df, windows): """ +DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return: """ window = cls.get_only_one_positive_int(windows) column_name = 'pdm_{}'.format(window) um, dm = df['um'], df['dm'] df['pdm'] = np.where(um > dm, um, 0) if window > 1: pdm = df['pdm_{}_ema'.format(window)] else: pdm = df['pdm'] df[column_name] = pdm
+DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return:
Below is the the instruction that describes the task: ### Input: +DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return: ### Response: def _get_pdm(cls, df, windows): """ +DM, positive directional moving If window is not 1, calculate the SMMA of +DM :param df: data :param windows: range :return: """ window = cls.get_only_one_positive_int(windows) column_name = 'pdm_{}'.format(window) um, dm = df['um'], df['dm'] df['pdm'] = np.where(um > dm, um, 0) if window > 1: pdm = df['pdm_{}_ema'.format(window)] else: pdm = df['pdm'] df[column_name] = pdm
def do_termchar(self, args): """Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>] """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: try: charmap = { u'\r': 'CR', u'\n': 'LF', u'\r\n': 'CRLF', u'\0': 'NUL' } chr = self.current.read_termination if chr in charmap: chr = charmap[chr] chw = self.current.write_termination if chw in charmap: chw = charmap[chw] print('Termchar read: {} write: {}'.format(chr, chw)) except Exception as e: print(e) else: args = args.split(' ') charmap = { 'CR': u'\r', 'LF': u'\n', 'CRLF': u'\r\n', 'NUL': u'\0', 'None': None } chr = args[0] chw = args[0 if len(args) == 1 else 1] if chr in charmap and chw in charmap: try: self.current.read_termination = charmap[chr] self.current.write_termination = charmap[chw] print('Done') except Exception as e: print(e) else: print('use CR, LF, CRLF, NUL or None to set termchar') return
Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>]
Below is the the instruction that describes the task: ### Input: Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>] ### Response: def do_termchar(self, args): """Get or set termination character for resource in use. <termchar> can be one of: CR, LF, CRLF, NUL or None. None is used to disable termination character Get termination character: termchar Set termination character read or read+write: termchar <termchar> [<termchar>] """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: try: charmap = { u'\r': 'CR', u'\n': 'LF', u'\r\n': 'CRLF', u'\0': 'NUL' } chr = self.current.read_termination if chr in charmap: chr = charmap[chr] chw = self.current.write_termination if chw in charmap: chw = charmap[chw] print('Termchar read: {} write: {}'.format(chr, chw)) except Exception as e: print(e) else: args = args.split(' ') charmap = { 'CR': u'\r', 'LF': u'\n', 'CRLF': u'\r\n', 'NUL': u'\0', 'None': None } chr = args[0] chw = args[0 if len(args) == 1 else 1] if chr in charmap and chw in charmap: try: self.current.read_termination = charmap[chr] self.current.write_termination = charmap[chw] print('Done') except Exception as e: print(e) else: print('use CR, LF, CRLF, NUL or None to set termchar') return
def draw_image(self, ax, image): """Process a matplotlib image object and call renderer.draw_image""" self.renderer.draw_image(imdata=utils.image_to_base64(image), extent=image.get_extent(), coordinates="data", style={"alpha": image.get_alpha(), "zorder": image.get_zorder()}, mplobj=image)
Process a matplotlib image object and call renderer.draw_image
Below is the the instruction that describes the task: ### Input: Process a matplotlib image object and call renderer.draw_image ### Response: def draw_image(self, ax, image): """Process a matplotlib image object and call renderer.draw_image""" self.renderer.draw_image(imdata=utils.image_to_base64(image), extent=image.get_extent(), coordinates="data", style={"alpha": image.get_alpha(), "zorder": image.get_zorder()}, mplobj=image)
def add_index(self, field, value): """ add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>` """ if field[-4:] not in ("_bin", "_int"): raise RiakError("Riak 2i fields must end with either '_bin'" " or '_int'.") self.indexes.add((field, value)) return self._robject
add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`
Below is the the instruction that describes the task: ### Input: add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>` ### Response: def add_index(self, field, value): """ add_index(field, value) Tag this object with the specified field/value pair for indexing. :param field: The index field. :type field: string :param value: The index value. :type value: string or integer :rtype: :class:`RiakObject <riak.riak_object.RiakObject>` """ if field[-4:] not in ("_bin", "_int"): raise RiakError("Riak 2i fields must end with either '_bin'" " or '_int'.") self.indexes.add((field, value)) return self._robject
def tree_walk(cls, directory, tree): """Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().""" results = [] dirs = [d for d in tree if d != FILE_MARKER] files = tree[FILE_MARKER] results.append((directory, dirs, files)) for d in dirs: subdir = os.path.join(directory, d) subtree = tree[d] results.extend(cls.tree_walk(subdir, subtree)) return results
Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().
Below is the the instruction that describes the task: ### Input: Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk(). ### Response: def tree_walk(cls, directory, tree): """Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().""" results = [] dirs = [d for d in tree if d != FILE_MARKER] files = tree[FILE_MARKER] results.append((directory, dirs, files)) for d in dirs: subdir = os.path.join(directory, d) subtree = tree[d] results.extend(cls.tree_walk(subdir, subtree)) return results
def xcorr(x, y=None, maxlags=None, norm='biased'): """Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`. """ N = len(x) if y is None: y = x assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed' if maxlags is None: maxlags = N-1 lags = np.arange(0, 2*N-1) else: assert maxlags <= N, 'maxlags must be less than data length' lags = np.arange(N-maxlags-1, N+maxlags) res = np.correlate(x, y, mode='full') if norm == 'biased': Nf = float(N) res = res[lags] / float(N) # do not use /= !! elif norm == 'unbiased': res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags] elif norm == 'coeff': Nf = float(N) rms = pylab_rms_flat(x) * pylab_rms_flat(y) res = res[lags] / rms / Nf else: res = res[lags] lags = np.arange(-maxlags, maxlags+1) return res, lags
Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`.
Below is the the instruction that describes the task: ### Input: Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`. ### Response: def xcorr(x, y=None, maxlags=None, norm='biased'): """Cross-correlation using numpy.correlate Estimates the cross-correlation (and autocorrelation) sequence of a random process of length N. By default, there is no normalisation and the output sequence of the cross-correlation has a length 2*N+1. :param array x: first data array of length N :param array y: second data array of length N. If not specified, computes the autocorrelation. :param int maxlags: compute cross correlation between [-maxlags:maxlags] when maxlags is not specified, the range of lags is [-N+1:N-1]. :param str option: normalisation in ['biased', 'unbiased', None, 'coeff'] The true cross-correlation sequence is .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m]) However, in practice, only a finite segment of one realization of the infinite-length random process is available. The correlation is estimated using numpy.correlate(x,y,'full'). Normalisation is handled by this function using the following cases: * 'biased': Biased estimate of the cross-correlation function * 'unbiased': Unbiased estimate of the cross-correlation function * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0. :return: * a numpy.array containing the cross-correlation sequence (length 2*N-1) * lags vector .. note:: If x and y are not the same length, the shorter vector is zero-padded to the length of the longer vector. .. rubric:: Examples .. doctest:: >>> from spectrum import xcorr >>> x = [1,2,3,4,5] >>> c, l = xcorr(x,x, maxlags=0, norm='biased') >>> c array([ 11.]) .. seealso:: :func:`CORRELATION`. """ N = len(x) if y is None: y = x assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed' if maxlags is None: maxlags = N-1 lags = np.arange(0, 2*N-1) else: assert maxlags <= N, 'maxlags must be less than data length' lags = np.arange(N-maxlags-1, N+maxlags) res = np.correlate(x, y, mode='full') if norm == 'biased': Nf = float(N) res = res[lags] / float(N) # do not use /= !! elif norm == 'unbiased': res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags] elif norm == 'coeff': Nf = float(N) rms = pylab_rms_flat(x) * pylab_rms_flat(y) res = res[lags] / rms / Nf else: res = res[lags] lags = np.arange(-maxlags, maxlags+1) return res, lags
def state(self): """Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout. """ if self.method in ['OPTIONS', 'DESCRIBE', 'SETUP', 'PLAY']: state = STATE_STARTING elif self.method in ['KEEP-ALIVE']: state = STATE_PLAYING else: state = STATE_STOPPED _LOGGER.debug('RTSP session (%s) state %s', self.host, state) return state
Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout.
Below is the the instruction that describes the task: ### Input: Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout. ### Response: def state(self): """Which state the session is in. Starting - all messages needed to get stream started. Playing - keep-alive messages every self.session_timeout. """ if self.method in ['OPTIONS', 'DESCRIBE', 'SETUP', 'PLAY']: state = STATE_STARTING elif self.method in ['KEEP-ALIVE']: state = STATE_PLAYING else: state = STATE_STOPPED _LOGGER.debug('RTSP session (%s) state %s', self.host, state) return state
def provider_parser(subparser): """Configure a provider parser for Hetzner""" subparser.add_argument('--auth-account', help='specify type of Hetzner account: by default Hetzner Robot ' '(robot) or Hetzner konsoleH (konsoleh)') subparser.add_argument('--auth-username', help='specify username of Hetzner account') subparser.add_argument('--auth-password', help='specify password of Hetzner account') subparser.add_argument('--linked', help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit ' 'actions: by default (yes); Further restriction: Only enabled if ' 'record name or raw FQDN record identifier \'type/name/content\' is ' 'specified, and additionally for update actions the record name ' 'remains the same', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--propagated', help='waits until record is publicly propagated after succeeded ' 'create|update actions: by default (yes)', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--latency', help='specify latency, used during checks for publicly propagation ' 'and additionally for Hetzner Robot after record edits: by default ' '30s (30)', default=int(30), type=int)
Configure a provider parser for Hetzner
Below is the the instruction that describes the task: ### Input: Configure a provider parser for Hetzner ### Response: def provider_parser(subparser): """Configure a provider parser for Hetzner""" subparser.add_argument('--auth-account', help='specify type of Hetzner account: by default Hetzner Robot ' '(robot) or Hetzner konsoleH (konsoleh)') subparser.add_argument('--auth-username', help='specify username of Hetzner account') subparser.add_argument('--auth-password', help='specify password of Hetzner account') subparser.add_argument('--linked', help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit ' 'actions: by default (yes); Further restriction: Only enabled if ' 'record name or raw FQDN record identifier \'type/name/content\' is ' 'specified, and additionally for update actions the record name ' 'remains the same', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--propagated', help='waits until record is publicly propagated after succeeded ' 'create|update actions: by default (yes)', default=str('yes'), choices=['yes', 'no']) subparser.add_argument('--latency', help='specify latency, used during checks for publicly propagation ' 'and additionally for Hetzner Robot after record edits: by default ' '30s (30)', default=int(30), type=int)
def get_content(self): """Open content as a stream for reading. See DAVResource.get_content() """ filestream = compat.StringIO() tableName, primKey = self.provider._split_path(self.path) if primKey is not None: conn = self.provider._init_connection() listFields = self.provider._get_field_list(conn, tableName) csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore") dictFields = {} for field_name in listFields: dictFields[field_name] = field_name csvwriter.writerow(dictFields) if primKey == "_ENTIRE_CONTENTS": cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SELECT * from " + self.provider._db + "." + tableName) result_set = cursor.fetchall() for row in result_set: csvwriter.writerow(row) cursor.close() else: row = self.provider._get_record_by_primary_key(conn, tableName, primKey) if row is not None: csvwriter.writerow(row) conn.close() # this suffices for small dbs, but # for a production big database, I imagine you would have a FileMixin that # does the retrieving and population even as the file object is being read filestream.seek(0) return filestream
Open content as a stream for reading. See DAVResource.get_content()
Below is the the instruction that describes the task: ### Input: Open content as a stream for reading. See DAVResource.get_content() ### Response: def get_content(self): """Open content as a stream for reading. See DAVResource.get_content() """ filestream = compat.StringIO() tableName, primKey = self.provider._split_path(self.path) if primKey is not None: conn = self.provider._init_connection() listFields = self.provider._get_field_list(conn, tableName) csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore") dictFields = {} for field_name in listFields: dictFields[field_name] = field_name csvwriter.writerow(dictFields) if primKey == "_ENTIRE_CONTENTS": cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SELECT * from " + self.provider._db + "." + tableName) result_set = cursor.fetchall() for row in result_set: csvwriter.writerow(row) cursor.close() else: row = self.provider._get_record_by_primary_key(conn, tableName, primKey) if row is not None: csvwriter.writerow(row) conn.close() # this suffices for small dbs, but # for a production big database, I imagine you would have a FileMixin that # does the retrieving and population even as the file object is being read filestream.seek(0) return filestream
def ec2_network_network_acl_id(self, lookup, default=None): """ Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found """ network_acl_id = EFAwsResolver.__CLIENTS["ec2"].describe_network_acls(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(network_acl_id["NetworkAcls"]) > 0: return network_acl_id["NetworkAcls"][0]["NetworkAclId"] else: return default
Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found
Below is the the instruction that describes the task: ### Input: Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found ### Response: def ec2_network_network_acl_id(self, lookup, default=None): """ Args: lookup: the friendly name of the network ACL we are looking up default: the optional value to return if lookup failed; returns None if not set Returns: the ID of the network ACL, or None if no match found """ network_acl_id = EFAwsResolver.__CLIENTS["ec2"].describe_network_acls(Filters=[{ 'Name': 'tag:Name', 'Values': [lookup] }]) if len(network_acl_id["NetworkAcls"]) > 0: return network_acl_id["NetworkAcls"][0]["NetworkAclId"] else: return default
def end(self): """Shutdown the curses window.""" if hasattr(curses, 'echo'): curses.echo() if hasattr(curses, 'nocbreak'): curses.nocbreak() if hasattr(curses, 'curs_set'): try: curses.curs_set(1) except Exception: pass curses.endwin()
Shutdown the curses window.
Below is the the instruction that describes the task: ### Input: Shutdown the curses window. ### Response: def end(self): """Shutdown the curses window.""" if hasattr(curses, 'echo'): curses.echo() if hasattr(curses, 'nocbreak'): curses.nocbreak() if hasattr(curses, 'curs_set'): try: curses.curs_set(1) except Exception: pass curses.endwin()
def get_summary(list_all=[], **kwargs): ''' summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url ''' all_summary = [] for module in list_all: summary = { "module_name" : module['Name'], "show_all" : kwargs.get("show_all",True), "project_name" : kwargs.get("proj_name","TestProject"), "home_page" : kwargs.get("home_page",__about__.HOME_PAGE), "start_time" : "", "end_time" : "", "duration_seconds" : "", "total_case_num" : len(module["TestCases"]), "pass_cases_num" : 0, "fail_cases_num" : 0, "details" : [] } for case in module["TestCases"]: case_detail = {} case_detail["linkurl"] = "./caselogs/%s_%s.log" %(case["case_name"],case["exec_date"]) if case["status"].lower() == "pass": summary["pass_cases_num"] += 1 case_detail["c_style"] = "tr_pass" else: summary["fail_cases_num"] += 1 case_detail["c_style"] = "tr_fail" case_detail.update(case) summary["details"].append(case_detail) try: st = module["TestCases"][0].get("start_at") et = module["TestCases"][-1].get("end_at") summary["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(st)) summary["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(et)) summary["duration_seconds"] = float("%.2f" %(et - st)) except Exception as _: logger.log_warning("Will set 'start_at' and 'end_at' to 'None'") (summary["start_time"], summary["end_time"], summary["duration_seconds"]) = (None,None,None) if summary["fail_cases_num"] > 0: summary["dict_report"] = {"result":0,"message":"failure","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} else: summary["dict_report"] = {"result":1,"message":"success","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} all_summary.append(summary) return all_summary
summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url
Below is the the instruction that describes the task: ### Input: summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url ### Response: def get_summary(list_all=[], **kwargs): ''' summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url ''' all_summary = [] for module in list_all: summary = { "module_name" : module['Name'], "show_all" : kwargs.get("show_all",True), "project_name" : kwargs.get("proj_name","TestProject"), "home_page" : kwargs.get("home_page",__about__.HOME_PAGE), "start_time" : "", "end_time" : "", "duration_seconds" : "", "total_case_num" : len(module["TestCases"]), "pass_cases_num" : 0, "fail_cases_num" : 0, "details" : [] } for case in module["TestCases"]: case_detail = {} case_detail["linkurl"] = "./caselogs/%s_%s.log" %(case["case_name"],case["exec_date"]) if case["status"].lower() == "pass": summary["pass_cases_num"] += 1 case_detail["c_style"] = "tr_pass" else: summary["fail_cases_num"] += 1 case_detail["c_style"] = "tr_fail" case_detail.update(case) summary["details"].append(case_detail) try: st = module["TestCases"][0].get("start_at") et = module["TestCases"][-1].get("end_at") summary["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(st)) summary["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(et)) summary["duration_seconds"] = float("%.2f" %(et - st)) except Exception as _: logger.log_warning("Will set 'start_at' and 'end_at' to 'None'") (summary["start_time"], summary["end_time"], summary["duration_seconds"]) = (None,None,None) if summary["fail_cases_num"] > 0: summary["dict_report"] = {"result":0,"message":"failure","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} else: summary["dict_report"] = {"result":1,"message":"success","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} all_summary.append(summary) return all_summary
def determine_type(filename): '''Determine the file type and return it.''' ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
Determine the file type and return it.
Below is the the instruction that describes the task: ### Input: Determine the file type and return it. ### Response: def determine_type(filename): '''Determine the file type and return it.''' ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
def get_posts(self, include_draft=False, filter_functions=None): """ Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post) """ def posts_generator(path): """Loads valid posts one by one in the given path.""" if os.path.isdir(path): for file in os.listdir(path): filename, ext = os.path.splitext(file) format_name = get_standard_format_name(ext[1:]) if format_name is not None and re.match( r'\d{4}-\d{2}-\d{2}-.+', filename): # the format is supported and the filename is valid, # so load this post post = Post() post.format = format_name post.meta, post.raw_content = FileStorage.read_file( os.path.join(path, file)) post.rel_url = filename.replace('-', '/', 3) + '/' post.unique_key = '/post/' + post.rel_url yield post posts_path = os.path.join(current_app.instance_path, 'posts') result = filter(lambda p: include_draft or not p.is_draft, posts_generator(posts_path)) result = self._filter_result(result, filter_functions) return sorted(result, key=lambda p: p.created, reverse=True)
Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post)
Below is the the instruction that describes the task: ### Input: Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post) ### Response: def get_posts(self, include_draft=False, filter_functions=None): """ Get all posts from filesystem. :param include_draft: return draft posts or not :param filter_functions: filter to apply BEFORE result being sorted :return: an iterable of Post objects (the first is the latest post) """ def posts_generator(path): """Loads valid posts one by one in the given path.""" if os.path.isdir(path): for file in os.listdir(path): filename, ext = os.path.splitext(file) format_name = get_standard_format_name(ext[1:]) if format_name is not None and re.match( r'\d{4}-\d{2}-\d{2}-.+', filename): # the format is supported and the filename is valid, # so load this post post = Post() post.format = format_name post.meta, post.raw_content = FileStorage.read_file( os.path.join(path, file)) post.rel_url = filename.replace('-', '/', 3) + '/' post.unique_key = '/post/' + post.rel_url yield post posts_path = os.path.join(current_app.instance_path, 'posts') result = filter(lambda p: include_draft or not p.is_draft, posts_generator(posts_path)) result = self._filter_result(result, filter_functions) return sorted(result, key=lambda p: p.created, reverse=True)
def finalize(self): """ Get the base64-encoded signature itself. Can only be called once. """ signature = self.signer.finalize() sig_r, sig_s = decode_dss_signature(signature) sig_b64 = encode_signature(sig_r, sig_s) return sig_b64
Get the base64-encoded signature itself. Can only be called once.
Below is the the instruction that describes the task: ### Input: Get the base64-encoded signature itself. Can only be called once. ### Response: def finalize(self): """ Get the base64-encoded signature itself. Can only be called once. """ signature = self.signer.finalize() sig_r, sig_s = decode_dss_signature(signature) sig_b64 = encode_signature(sig_r, sig_s) return sig_b64
def emit(self, record): """Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit. """ # People occasionally call logging functions at import time before # our flags may have even been defined yet, let alone even parsed, as we # rely on the C++ side to define some flags for us and app init to # deal with parsing. Match the C++ library behavior of notify and emit # such messages to stderr. It encourages people to clean-up and does # not hide the message. level = record.levelno if not FLAGS.is_parsed(): # Also implies "before flag has been defined". global _warn_preinit_stderr if _warn_preinit_stderr: sys.stderr.write( 'WARNING: Logging before flag parsing goes to stderr.\n') _warn_preinit_stderr = False self._log_to_stderr(record) elif FLAGS['logtostderr'].value: self._log_to_stderr(record) else: super(PythonHandler, self).emit(record) stderr_threshold = converter.string_to_standard( FLAGS['stderrthreshold'].value) if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and self.stream != sys.stderr): self._log_to_stderr(record) # Die when the record is created from ABSLLogger and level is FATAL. if _is_absl_fatal_record(record): self.flush() # Flush the log before dying. # In threaded python, sys.exit() from a non-main thread only # exits the thread in question. os.abort()
Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit.
Below is the the instruction that describes the task: ### Input: Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit. ### Response: def emit(self, record): """Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit. """ # People occasionally call logging functions at import time before # our flags may have even been defined yet, let alone even parsed, as we # rely on the C++ side to define some flags for us and app init to # deal with parsing. Match the C++ library behavior of notify and emit # such messages to stderr. It encourages people to clean-up and does # not hide the message. level = record.levelno if not FLAGS.is_parsed(): # Also implies "before flag has been defined". global _warn_preinit_stderr if _warn_preinit_stderr: sys.stderr.write( 'WARNING: Logging before flag parsing goes to stderr.\n') _warn_preinit_stderr = False self._log_to_stderr(record) elif FLAGS['logtostderr'].value: self._log_to_stderr(record) else: super(PythonHandler, self).emit(record) stderr_threshold = converter.string_to_standard( FLAGS['stderrthreshold'].value) if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and self.stream != sys.stderr): self._log_to_stderr(record) # Die when the record is created from ABSLLogger and level is FATAL. if _is_absl_fatal_record(record): self.flush() # Flush the log before dying. # In threaded python, sys.exit() from a non-main thread only # exits the thread in question. os.abort()
def title(self, gender: Optional[Gender] = None, title_type: Optional[TitleType] = None) -> str: """Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD. """ gender_key = self._validate_enum(gender, Gender) title_key = self._validate_enum(title_type, TitleType) titles = self._data['title'][gender_key][title_key] return self.random.choice(titles)
Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD.
Below is the the instruction that describes the task: ### Input: Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD. ### Response: def title(self, gender: Optional[Gender] = None, title_type: Optional[TitleType] = None) -> str: """Generate a random title for name. You can generate random prefix or suffix for name using this method. :param gender: The gender. :param title_type: TitleType enum object. :return: The title. :raises NonEnumerableError: if gender or title_type in incorrect format. :Example: PhD. """ gender_key = self._validate_enum(gender, Gender) title_key = self._validate_enum(title_type, TitleType) titles = self._data['title'][gender_key][title_key] return self.random.choice(titles)
def forward(self, input, target): """ NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss """ jinput, input_is_table = Layer.check_input(input) jtarget, target_is_table = Layer.check_input(target) output = callBigDlFunc(self.bigdl_type, "criterionForward", self.value, jinput, input_is_table, jtarget, target_is_table) return output
NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss
Below is the the instruction that describes the task: ### Input: NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss ### Response: def forward(self, input, target): """ NB: It's for debug only, please use optimizer.optimize() in production. Takes an input object, and computes the corresponding loss of the criterion, compared with `target` :param input: ndarray or list of ndarray :param target: ndarray or list of ndarray :return: value of loss """ jinput, input_is_table = Layer.check_input(input) jtarget, target_is_table = Layer.check_input(target) output = callBigDlFunc(self.bigdl_type, "criterionForward", self.value, jinput, input_is_table, jtarget, target_is_table) return output
def _wait_for_read_ready_or_timeout(self, timeout): """Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received""" remaining_timeout = timeout t0 = time.time() while True: try: (rs, _, _) = select.select( [self.in_stream.fileno()] + self.readers, [], [], remaining_timeout) if not rs: return False, None r = rs[0] # if there's more than one, get it in the next loop if r == self.in_stream.fileno(): return True, None else: os.read(r, 1024) if self.queued_interrupting_events: return False, self.queued_interrupting_events.pop(0) elif remaining_timeout is not None: remaining_timeout = max(0, t0 + timeout - time.time()) continue else: continue except select.error: if self.sigints: return False, self.sigints.pop() if remaining_timeout is not None: remaining_timeout = max(timeout - (time.time() - t0), 0)
Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received
Below is the the instruction that describes the task: ### Input: Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received ### Response: def _wait_for_read_ready_or_timeout(self, timeout): """Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received""" remaining_timeout = timeout t0 = time.time() while True: try: (rs, _, _) = select.select( [self.in_stream.fileno()] + self.readers, [], [], remaining_timeout) if not rs: return False, None r = rs[0] # if there's more than one, get it in the next loop if r == self.in_stream.fileno(): return True, None else: os.read(r, 1024) if self.queued_interrupting_events: return False, self.queued_interrupting_events.pop(0) elif remaining_timeout is not None: remaining_timeout = max(0, t0 + timeout - time.time()) continue else: continue except select.error: if self.sigints: return False, self.sigints.pop() if remaining_timeout is not None: remaining_timeout = max(timeout - (time.time() - t0), 0)
def has_property(self, property_name): """ Check if schema has property :param property_name: str, name to check :return: bool """ if property_name in self.properties: return True elif property_name in self.entities: return True elif property_name in self.collections: return True else: return False
Check if schema has property :param property_name: str, name to check :return: bool
Below is the the instruction that describes the task: ### Input: Check if schema has property :param property_name: str, name to check :return: bool ### Response: def has_property(self, property_name): """ Check if schema has property :param property_name: str, name to check :return: bool """ if property_name in self.properties: return True elif property_name in self.entities: return True elif property_name in self.collections: return True else: return False
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
Update the BB models and the estimates
Below is the the instruction that describes the task: ### Input: Update the BB models and the estimates ### Response: def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
def loadJSON(self, filename): """Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json") """ with open(filename, "r") as f: self.merge(json.load(f)) return self
Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json")
Below is the the instruction that describes the task: ### Input: Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json") ### Response: def loadJSON(self, filename): """Adds the data from a JSON file. The file is expected to be in datapoint format:: d = DatapointArray().loadJSON("myfile.json") """ with open(filename, "r") as f: self.merge(json.load(f)) return self
def get(id_, hwid, type_, unit, precision, as_json): """Get temperature of a specific sensor""" if id_ and (hwid or type_): raise click.BadOptionUsage( "If --id is given --hwid and --type are not allowed." ) if id_: try: sensor = W1ThermSensor.get_available_sensors()[id_ - 1] except IndexError: raise click.BadOptionUsage( "No sensor with id {0} available. " "Use the ls command to show all available sensors.".format(id_) ) else: sensor = W1ThermSensor(type_, hwid) if precision: sensor.set_precision(precision, persist=False) temperature = sensor.get_temperature(unit) if as_json: data = { "hwid": sensor.id, "type": sensor.type_name, "temperature": temperature, "unit": unit, } click.echo(json.dumps(data, indent=4, sort_keys=True)) else: click.echo( "Sensor {0} measured temperature: {1} {2}".format( click.style(sensor.id, bold=True), click.style(str(temperature), bold=True), click.style(unit, bold=True), ) )
Get temperature of a specific sensor
Below is the the instruction that describes the task: ### Input: Get temperature of a specific sensor ### Response: def get(id_, hwid, type_, unit, precision, as_json): """Get temperature of a specific sensor""" if id_ and (hwid or type_): raise click.BadOptionUsage( "If --id is given --hwid and --type are not allowed." ) if id_: try: sensor = W1ThermSensor.get_available_sensors()[id_ - 1] except IndexError: raise click.BadOptionUsage( "No sensor with id {0} available. " "Use the ls command to show all available sensors.".format(id_) ) else: sensor = W1ThermSensor(type_, hwid) if precision: sensor.set_precision(precision, persist=False) temperature = sensor.get_temperature(unit) if as_json: data = { "hwid": sensor.id, "type": sensor.type_name, "temperature": temperature, "unit": unit, } click.echo(json.dumps(data, indent=4, sort_keys=True)) else: click.echo( "Sensor {0} measured temperature: {1} {2}".format( click.style(sensor.id, bold=True), click.style(str(temperature), bold=True), click.style(unit, bold=True), ) )
def gep(self, ptr, indices, inbounds=False, name=''): """ Compute effective address (getelementptr): name = getelementptr ptr, <indices...> """ instr = instructions.GEPInstr(self.block, ptr, indices, inbounds=inbounds, name=name) self._insert(instr) return instr
Compute effective address (getelementptr): name = getelementptr ptr, <indices...>
Below is the the instruction that describes the task: ### Input: Compute effective address (getelementptr): name = getelementptr ptr, <indices...> ### Response: def gep(self, ptr, indices, inbounds=False, name=''): """ Compute effective address (getelementptr): name = getelementptr ptr, <indices...> """ instr = instructions.GEPInstr(self.block, ptr, indices, inbounds=inbounds, name=name) self._insert(instr) return instr
def cds_column_replace(source, data): """ Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated. """ current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))] new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))] untouched = [k for k in source.data if k not in data] return bool(untouched and current_length and new_length and current_length[0] != new_length[0])
Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated.
Below is the the instruction that describes the task: ### Input: Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated. ### Response: def cds_column_replace(source, data): """ Determine if the CDS.data requires a full replacement or simply needs to be updated. A replacement is required if untouched columns are not the same length as the columns being updated. """ current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))] new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))] untouched = [k for k in source.data if k not in data] return bool(untouched and current_length and new_length and current_length[0] != new_length[0])
def dsort(fname, order, has_header=True, frow=0, ofname=None): r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ ofname = fname if ofname is None else ofname obj = CsvFile(fname=fname, has_header=has_header, frow=frow) obj.dsort(order) obj.write(fname=ofname, header=has_header, append=False)
r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
Below is the the instruction that describes the task: ### Input: r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] ### Response: def dsort(fname, order, has_header=True, frow=0, ofname=None): r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ ofname = fname if ofname is None else ofname obj = CsvFile(fname=fname, has_header=has_header, frow=frow) obj.dsort(order) obj.write(fname=ofname, header=has_header, append=False)
def mark_entries(self, entries): ''' Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL. ''' for entry in entries: self._set_entry_type(entry, RESOURCE_ENTRY) # If first entry doesn't have a redirect, set is as main entry main_entry = entries[0] main_location = self._get_location(main_entry) if not main_location: self._set_entry_type(main_entry, MAIN_ENTRY) return # Resolve redirected URL and see if it's in the rest of entries main_url = urllib.parse.urljoin(get_url(main_entry), main_location) for entry in entries[1:]: url = get_url(entry) if url == main_url: self._set_entry_type(entry, MAIN_ENTRY) break else: # In fail case, set the first entry self._set_entry_type(main_entry, MAIN_ENTRY)
Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL.
Below is the the instruction that describes the task: ### Input: Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL. ### Response: def mark_entries(self, entries): ''' Mark one entry as main entry and the rest as resource entry. Main entry is the entry that contain response's body of the requested URL. ''' for entry in entries: self._set_entry_type(entry, RESOURCE_ENTRY) # If first entry doesn't have a redirect, set is as main entry main_entry = entries[0] main_location = self._get_location(main_entry) if not main_location: self._set_entry_type(main_entry, MAIN_ENTRY) return # Resolve redirected URL and see if it's in the rest of entries main_url = urllib.parse.urljoin(get_url(main_entry), main_location) for entry in entries[1:]: url = get_url(entry) if url == main_url: self._set_entry_type(entry, MAIN_ENTRY) break else: # In fail case, set the first entry self._set_entry_type(main_entry, MAIN_ENTRY)
def build_genome_alignment_from_file(ga_path, ref_spec, idx_path=None, verbose=False): """ build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately. """ blocks = [] if (idx_path is not None): bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec) hash_func = JustInTimeGenomeAlignmentBlock.build_hash factory = IndexedFile(None, bound_iter, hash_func) factory.read_index(idx_path, ga_path, verbose=verbose) pind = None for k in factory: if verbose: if pind is None: total = len(factory) pind = ProgressIndicator(totalToDo=total, messagePrefix="completed", messageSuffix="building alignment blocks ") pind.done += 1 pind.showProgress() blocks.append(JustInTimeGenomeAlignmentBlock(factory, k)) else: for b in genome_alignment_iterator(ga_path, ref_spec, verbose=verbose): blocks.append(b) return GenomeAlignment(blocks, verbose)
build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately.
Below is the the instruction that describes the task: ### Input: build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately. ### Response: def build_genome_alignment_from_file(ga_path, ref_spec, idx_path=None, verbose=False): """ build a genome alignment by loading from a single MAF file. :param ga_path: the path to the file to load. :param ref_spec: which species in the MAF file is the reference? :param idx_path: if provided, use this index to generate a just-in-time genome alignment, instead of loading the file immediately. """ blocks = [] if (idx_path is not None): bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec) hash_func = JustInTimeGenomeAlignmentBlock.build_hash factory = IndexedFile(None, bound_iter, hash_func) factory.read_index(idx_path, ga_path, verbose=verbose) pind = None for k in factory: if verbose: if pind is None: total = len(factory) pind = ProgressIndicator(totalToDo=total, messagePrefix="completed", messageSuffix="building alignment blocks ") pind.done += 1 pind.showProgress() blocks.append(JustInTimeGenomeAlignmentBlock(factory, k)) else: for b in genome_alignment_iterator(ga_path, ref_spec, verbose=verbose): blocks.append(b) return GenomeAlignment(blocks, verbose)
async def _send_report(self, status): """ Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine """ if len(self._notify) > 0: # Each client gets its own copy of the dict. asyncio.gather(*[coro(dict(status)) for coro in self._notify], loop=self.loop)
Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine
Below is the the instruction that describes the task: ### Input: Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine ### Response: async def _send_report(self, status): """ Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine """ if len(self._notify) > 0: # Each client gets its own copy of the dict. asyncio.gather(*[coro(dict(status)) for coro in self._notify], loop=self.loop)
def reviews(self, last_item, filter_=None): """Get the reviews starting from last_item.""" cmd = self._get_gerrit_cmd(last_item, filter_) logger.debug("Getting reviews with command: %s", cmd) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") return raw_data
Get the reviews starting from last_item.
Below is the the instruction that describes the task: ### Input: Get the reviews starting from last_item. ### Response: def reviews(self, last_item, filter_=None): """Get the reviews starting from last_item.""" cmd = self._get_gerrit_cmd(last_item, filter_) logger.debug("Getting reviews with command: %s", cmd) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") return raw_data
def run(args): """ Args: args (argparse.Namespace) """ with warnings.catch_warnings(): warnings.simplefilter('ignore') query = prepareQuery(args.query_file.read()) ds = Dataset() res_indices_prev = set() # de-duplication res_indices = set() # create sub graphs for f in args.graphs: g = Graph(identifier=os.path.basename(f.name)) g.parse(data=f.read(), format='n3') ds.add_graph(g) # create and query data graph for data in read_by_chunk(args.input_file, int(args.chunk_size)): g = Graph(identifier='data') g.parse(data=data, format=args.input_type) ds.add_graph(g) res = ds.query(query) dedup_res_graph = Graph() if len(res) != 0: for r in res: tid = generate_index(r) res_indices.add(tid) if tid in res_indices_prev: # duplicated continue dedup_res_graph.add(r) if len(dedup_res_graph) > 0: ret = dedup_res_graph.serialize(format=args.output_type, encoding='utf-8') args.output_file.write(ret) ds.remove_graph(g) res_indices_prev = res_indices res_indices = set()
Args: args (argparse.Namespace)
Below is the the instruction that describes the task: ### Input: Args: args (argparse.Namespace) ### Response: def run(args): """ Args: args (argparse.Namespace) """ with warnings.catch_warnings(): warnings.simplefilter('ignore') query = prepareQuery(args.query_file.read()) ds = Dataset() res_indices_prev = set() # de-duplication res_indices = set() # create sub graphs for f in args.graphs: g = Graph(identifier=os.path.basename(f.name)) g.parse(data=f.read(), format='n3') ds.add_graph(g) # create and query data graph for data in read_by_chunk(args.input_file, int(args.chunk_size)): g = Graph(identifier='data') g.parse(data=data, format=args.input_type) ds.add_graph(g) res = ds.query(query) dedup_res_graph = Graph() if len(res) != 0: for r in res: tid = generate_index(r) res_indices.add(tid) if tid in res_indices_prev: # duplicated continue dedup_res_graph.add(r) if len(dedup_res_graph) > 0: ret = dedup_res_graph.serialize(format=args.output_type, encoding='utf-8') args.output_file.write(ret) ds.remove_graph(g) res_indices_prev = res_indices res_indices = set()
def _parse_tensor(self, indices=False): '''Parse a tensor.''' if indices: self.line = self._skip_lines(1) tensor = np.zeros((3, 3)) for i in range(3): tokens = self.line.split() if indices: tensor[i][0] = float(tokens[1]) tensor[i][1] = float(tokens[2]) tensor[i][2] = float(tokens[3]) else: tensor[i][0] = float(tokens[0]) tensor[i][1] = float(tokens[1]) tensor[i][2] = float(tokens[2]) self.line = self._skip_lines(1) return tensor
Parse a tensor.
Below is the the instruction that describes the task: ### Input: Parse a tensor. ### Response: def _parse_tensor(self, indices=False): '''Parse a tensor.''' if indices: self.line = self._skip_lines(1) tensor = np.zeros((3, 3)) for i in range(3): tokens = self.line.split() if indices: tensor[i][0] = float(tokens[1]) tensor[i][1] = float(tokens[2]) tensor[i][2] = float(tokens[3]) else: tensor[i][0] = float(tokens[0]) tensor[i][1] = float(tokens[1]) tensor[i][2] = float(tokens[2]) self.line = self._skip_lines(1) return tensor
def save(df, path, data_paths): """ Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint. """ size = _reset_df_and_get_size(df) buffer = defaultdict(list) with get_tqdm(total=size) as pbar: for dp in df: assert len(dp) == len(data_paths), "Datapoint has {} components!".format(len(dp)) for k, el in zip(data_paths, dp): buffer[k].append(el) pbar.update() with h5py.File(path, 'w') as hf, get_tqdm(total=len(data_paths)) as pbar: for data_path in data_paths: hf.create_dataset(data_path, data=buffer[data_path]) pbar.update()
Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint.
Below is the the instruction that describes the task: ### Input: Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint. ### Response: def save(df, path, data_paths): """ Args: df (DataFlow): the DataFlow to serialize. path (str): output hdf5 file. data_paths (list[str]): list of h5 paths. It should have the same length as each datapoint, and each path should correspond to one component of the datapoint. """ size = _reset_df_and_get_size(df) buffer = defaultdict(list) with get_tqdm(total=size) as pbar: for dp in df: assert len(dp) == len(data_paths), "Datapoint has {} components!".format(len(dp)) for k, el in zip(data_paths, dp): buffer[k].append(el) pbar.update() with h5py.File(path, 'w') as hf, get_tqdm(total=len(data_paths)) as pbar: for data_path in data_paths: hf.create_dataset(data_path, data=buffer[data_path]) pbar.update()
def set_project_filenames(self, recent_files): """Set the list of open file names in a project""" if (self.current_active_project and self.is_valid_project( self.current_active_project.root_path)): self.current_active_project.set_recent_files(recent_files)
Set the list of open file names in a project
Below is the the instruction that describes the task: ### Input: Set the list of open file names in a project ### Response: def set_project_filenames(self, recent_files): """Set the list of open file names in a project""" if (self.current_active_project and self.is_valid_project( self.current_active_project.root_path)): self.current_active_project.set_recent_files(recent_files)
def revoke(self): """ * flag certificate as revoked * fill in revoked_at DateTimeField """ now = timezone.now() self.revoked = True self.revoked_at = now self.save()
* flag certificate as revoked * fill in revoked_at DateTimeField
Below is the the instruction that describes the task: ### Input: * flag certificate as revoked * fill in revoked_at DateTimeField ### Response: def revoke(self): """ * flag certificate as revoked * fill in revoked_at DateTimeField """ now = timezone.now() self.revoked = True self.revoked_at = now self.save()
def calcu0(self,E,Lz): """ NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS) """ logu0= optimize.brent(_u0Eq, args=(self._delta,self._pot, E,Lz**2./2.)) return numpy.exp(logu0)
NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS)
Below is the the instruction that describes the task: ### Input: NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS) ### Response: def calcu0(self,E,Lz): """ NAME: calcu0 PURPOSE: calculate the minimum of the u potential INPUT: E - energy Lz - angular momentum OUTPUT: u0 HISTORY: 2012-11-29 - Written - Bovy (IAS) """ logu0= optimize.brent(_u0Eq, args=(self._delta,self._pot, E,Lz**2./2.)) return numpy.exp(logu0)
def open_state_machine(path=None, recent_opened_notification=False): """ Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine """ start_time = time.time() if path is None: if interface.open_folder_func is None: logger.error("No function defined for opening a folder") return load_path = interface.open_folder_func("Please choose the folder of the state machine") if load_path is None: return else: load_path = path if state_machine_manager.is_state_machine_open(load_path): logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path)) sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path) gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id return state_machine_manager.get_open_state_machine_of_file_system_path(load_path) state_machine = None try: state_machine = storage.load_state_machine_from_path(load_path) state_machine_manager.add_state_machine(state_machine) if recent_opened_notification: global_runtime_config.update_recently_opened_state_machines_with(state_machine) duration = time.time() - start_time stat = state_machine.root_state.get_states_statistics(0) logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1])) except (AttributeError, ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) return state_machine
Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine
Below is the the instruction that describes the task: ### Input: Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine ### Response: def open_state_machine(path=None, recent_opened_notification=False): """ Open a state machine from respective file system path :param str path: file system path to the state machine :param bool recent_opened_notification: flags that indicates that this call also should update recently open :rtype rafcon.core.state_machine.StateMachine :return: opened state machine """ start_time = time.time() if path is None: if interface.open_folder_func is None: logger.error("No function defined for opening a folder") return load_path = interface.open_folder_func("Please choose the folder of the state machine") if load_path is None: return else: load_path = path if state_machine_manager.is_state_machine_open(load_path): logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path)) sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path) gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id return state_machine_manager.get_open_state_machine_of_file_system_path(load_path) state_machine = None try: state_machine = storage.load_state_machine_from_path(load_path) state_machine_manager.add_state_machine(state_machine) if recent_opened_notification: global_runtime_config.update_recently_opened_state_machines_with(state_machine) duration = time.time() - start_time stat = state_machine.root_state.get_states_statistics(0) logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1])) except (AttributeError, ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) return state_machine
def render_js_code(self, id_, *args, **kwargs): """Render html container for Select2 widget with options.""" if id_: options = self.render_select2_options_code( dict(self.get_options()), id_) return mark_safe(self.html.format(id=id_, options=options)) return u''
Render html container for Select2 widget with options.
Below is the the instruction that describes the task: ### Input: Render html container for Select2 widget with options. ### Response: def render_js_code(self, id_, *args, **kwargs): """Render html container for Select2 widget with options.""" if id_: options = self.render_select2_options_code( dict(self.get_options()), id_) return mark_safe(self.html.format(id=id_, options=options)) return u''
def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
Ensure the sync counter is a valid non-dummy object.
Below is the the instruction that describes the task: ### Input: Ensure the sync counter is a valid non-dummy object. ### Response: def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
def throttle(self, key, amount=1, rate=None, capacity=None, exc_class=Throttled, **kwargs): """Consume an amount for a given key, or raise a Throttled exception.""" if not self.consume(key, amount, rate, capacity, **kwargs): raise exc_class("Request of %d unit for %s exceeds capacity." % (amount, key))
Consume an amount for a given key, or raise a Throttled exception.
Below is the the instruction that describes the task: ### Input: Consume an amount for a given key, or raise a Throttled exception. ### Response: def throttle(self, key, amount=1, rate=None, capacity=None, exc_class=Throttled, **kwargs): """Consume an amount for a given key, or raise a Throttled exception.""" if not self.consume(key, amount, rate, capacity, **kwargs): raise exc_class("Request of %d unit for %s exceeds capacity." % (amount, key))
def dump_tables_to_tskit(pop): """ Converts fwdpy11.TableCollection to an tskit.TreeSequence """ node_view = np.array(pop.tables.nodes, copy=True) node_view['time'] -= node_view['time'].max() node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0 edge_view = np.array(pop.tables.edges, copy=False) mut_view = np.array(pop.tables.mutations, copy=False) tc = tskit.TableCollection(pop.tables.genome_length) # We must initialize population and individual # tables before we can do anything else. # Attempting to set population to anything # other than -1 in an tskit.NodeTable will # raise an exception if the PopulationTable # isn't set up. _initializePopulationTable(node_view, tc) node_to_individual = _initializeIndividualTable(pop, tc) individual = [-1 for i in range(len(node_view))] for k, v in node_to_individual.items(): individual[k] = v flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N) # Bug fixed in 0.3.1: add preserved nodes to samples list for i in pop.tables.preserved_nodes: flags[i] = 1 tc.nodes.set_columns(flags=flags, time=node_view['time'], population=node_view['population'], individual=individual) tc.edges.set_columns(left=edge_view['left'], right=edge_view['right'], parent=edge_view['parent'], child=edge_view['child']) mpos = np.array([pop.mutations[i].pos for i in mut_view['key']]) ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0') ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32) tc.sites.set_columns(position=mpos, ancestral_state=ancestral_state, ancestral_state_offset=ancestral_state_offset) derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1') md, mdo = _generate_mutation_metadata(pop) tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32), node=mut_view['node'], derived_state=derived_state, derived_state_offset=ancestral_state_offset, metadata=md, metadata_offset=mdo) return tc.tree_sequence()
Converts fwdpy11.TableCollection to an tskit.TreeSequence
Below is the the instruction that describes the task: ### Input: Converts fwdpy11.TableCollection to an tskit.TreeSequence ### Response: def dump_tables_to_tskit(pop): """ Converts fwdpy11.TableCollection to an tskit.TreeSequence """ node_view = np.array(pop.tables.nodes, copy=True) node_view['time'] -= node_view['time'].max() node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0 edge_view = np.array(pop.tables.edges, copy=False) mut_view = np.array(pop.tables.mutations, copy=False) tc = tskit.TableCollection(pop.tables.genome_length) # We must initialize population and individual # tables before we can do anything else. # Attempting to set population to anything # other than -1 in an tskit.NodeTable will # raise an exception if the PopulationTable # isn't set up. _initializePopulationTable(node_view, tc) node_to_individual = _initializeIndividualTable(pop, tc) individual = [-1 for i in range(len(node_view))] for k, v in node_to_individual.items(): individual[k] = v flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N) # Bug fixed in 0.3.1: add preserved nodes to samples list for i in pop.tables.preserved_nodes: flags[i] = 1 tc.nodes.set_columns(flags=flags, time=node_view['time'], population=node_view['population'], individual=individual) tc.edges.set_columns(left=edge_view['left'], right=edge_view['right'], parent=edge_view['parent'], child=edge_view['child']) mpos = np.array([pop.mutations[i].pos for i in mut_view['key']]) ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0') ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32) tc.sites.set_columns(position=mpos, ancestral_state=ancestral_state, ancestral_state_offset=ancestral_state_offset) derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1') md, mdo = _generate_mutation_metadata(pop) tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32), node=mut_view['node'], derived_state=derived_state, derived_state_offset=ancestral_state_offset, metadata=md, metadata_offset=mdo) return tc.tree_sequence()
def uncompress_files(original, destination): """ Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path """ with zipfile.ZipFile(original) as zips: extract_path = os.path.join(destination) zips.extractall(extract_path)
Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path
Below is the the instruction that describes the task: ### Input: Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path ### Response: def uncompress_files(original, destination): """ Move file from original path to destination path. :type original: str :param original: The location of zip file :type destination: str :param destination: The extract path """ with zipfile.ZipFile(original) as zips: extract_path = os.path.join(destination) zips.extractall(extract_path)
def connect(self): "Connects to the Redis server if not already connected" if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock self._selector = DefaultSelector(sock) try: self.on_connect() except RedisError: # clean up after any error in on_connect self.disconnect() raise # run any user callbacks. right now the only internal callback # is for pubsub channel/pattern resubscription for callback in self._connect_callbacks: callback(self)
Connects to the Redis server if not already connected
Below is the the instruction that describes the task: ### Input: Connects to the Redis server if not already connected ### Response: def connect(self): "Connects to the Redis server if not already connected" if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock self._selector = DefaultSelector(sock) try: self.on_connect() except RedisError: # clean up after any error in on_connect self.disconnect() raise # run any user callbacks. right now the only internal callback # is for pubsub channel/pattern resubscription for callback in self._connect_callbacks: callback(self)
def can_cast_to(v: Literal, dt: str) -> bool: """ 5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]." """ # TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte) return v.value is not None and Literal(str(v), datatype=dt).value is not None
5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]."
Below is the the instruction that describes the task: ### Input: 5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]." ### Response: def can_cast_to(v: Literal, dt: str) -> bool: """ 5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]." """ # TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte) return v.value is not None and Literal(str(v), datatype=dt).value is not None
def tag(self, alt='', use_size=None, **attrs): """ Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag. """ if use_size is None: if getattr(self, '_dimensions_cache', None): use_size = True else: try: self.storage.path(self.name) use_size = True except NotImplementedError: use_size = False attrs['alt'] = alt attrs['src'] = self.url if use_size: attrs.update(dict(width=self.width, height=self.height)) attrs = ' '.join(['%s="%s"' % (key, escape(value)) for key, value in sorted(attrs.items())]) return mark_safe('<img %s />' % attrs)
Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag.
Below is the the instruction that describes the task: ### Input: Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag. ### Response: def tag(self, alt='', use_size=None, **attrs): """ Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag. """ if use_size is None: if getattr(self, '_dimensions_cache', None): use_size = True else: try: self.storage.path(self.name) use_size = True except NotImplementedError: use_size = False attrs['alt'] = alt attrs['src'] = self.url if use_size: attrs.update(dict(width=self.width, height=self.height)) attrs = ' '.join(['%s="%s"' % (key, escape(value)) for key, value in sorted(attrs.items())]) return mark_safe('<img %s />' % attrs)
def exit_on_error(self, message, exit_code=1): # pylint: disable=no-self-use """Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None """ log = "I got an unrecoverable error. I have to exit." if message: log += "\n-----\nError message: %s" % message print("Error message: %s" % message) log += "-----\n" log += "You can get help at https://github.com/Alignak-monitoring/alignak\n" log += "If you think this is a bug, create a new issue including as much " \ "details as possible (version, configuration,...)" if exit_code is not None: exit(exit_code)
Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None
Below is the the instruction that describes the task: ### Input: Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None ### Response: def exit_on_error(self, message, exit_code=1): # pylint: disable=no-self-use """Log generic message when getting an error and exit :param exit_code: if not None, exit with the provided value as exit code :type exit_code: int :param message: message for the exit reason :type message: str :return: None """ log = "I got an unrecoverable error. I have to exit." if message: log += "\n-----\nError message: %s" % message print("Error message: %s" % message) log += "-----\n" log += "You can get help at https://github.com/Alignak-monitoring/alignak\n" log += "If you think this is a bug, create a new issue including as much " \ "details as possible (version, configuration,...)" if exit_code is not None: exit(exit_code)
def assemble_cx(): """Assemble INDRA Statements and return CX network json.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ca = CxAssembler(stmts) model_str = ca.make_model() res = {'model': model_str} return res
Assemble INDRA Statements and return CX network json.
Below is the the instruction that describes the task: ### Input: Assemble INDRA Statements and return CX network json. ### Response: def assemble_cx(): """Assemble INDRA Statements and return CX network json.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) ca = CxAssembler(stmts) model_str = ca.make_model() res = {'model': model_str} return res
def get_nodes(self, request): """ This method is used to build the menu tree. """ nodes = [] for shiny_app in ShinyApp.objects.all(): node = NavigationNode( shiny_app.name, reverse('cms_shiny:shiny_detail', args=(shiny_app.slug,)), shiny_app.slug ) nodes.append(node) return nodes
This method is used to build the menu tree.
Below is the the instruction that describes the task: ### Input: This method is used to build the menu tree. ### Response: def get_nodes(self, request): """ This method is used to build the menu tree. """ nodes = [] for shiny_app in ShinyApp.objects.all(): node = NavigationNode( shiny_app.name, reverse('cms_shiny:shiny_detail', args=(shiny_app.slug,)), shiny_app.slug ) nodes.append(node) return nodes
def reset( self ): """ Resets the user interface buttons for this widget. """ # clear previous widgets for btn in self.findChildren(QToolButton): btn.close() btn.setParent(None) btn.deleteLater() # determine coloring options palette = self.palette() unchecked = palette.color(palette.Button) # determine if this is a dark or light scheme avg = (unchecked.red() + unchecked.green() + unchecked.blue()) / 3.0 if ( avg < 140 ): checked = unchecked.lighter(115) checked_clr = self.colorString(unchecked.lighter(120)) border_clr = self.colorString(unchecked.darker(140)) unchecked_clr = self.colorString(checked.lighter(140)) unchecked_clr_alt = self.colorString(checked.lighter(120)) checked_clr_alt = self.colorString(unchecked) else: checked = unchecked.lighter(120) checked_clr = self.colorString(unchecked) border_clr = self.colorString(unchecked.darker(160)) unchecked_clr = self.colorString(checked) unchecked_clr_alt = self.colorString(checked.darker(130)) checked_clr_alt = self.colorString(unchecked.darker(120)) # define the stylesheet options options = {} options['top_left_radius'] = 0 options['top_right_radius'] = 0 options['bot_left_radius'] = 0 options['bot_right_radius'] = 0 options['border_color'] = border_clr options['checked_clr'] = checked_clr options['checked_clr_alt'] = checked_clr_alt options['unchecked_clr'] = unchecked_clr options['unchecked_clr_alt'] = unchecked_clr_alt options['padding_top'] = 1 options['padding_bottom'] = 1 options['padding_left'] = 1 options['padding_right'] = 1 horiz = self.direction() in (QBoxLayout.LeftToRight, QBoxLayout.RightToLeft) if ( horiz ): options['x1'] = 0 options['y1'] = 0 options['x2'] = 0 options['y2'] = 1 else: options['x1'] = 0 options['y1'] = 0 options['x2'] = 1 options['y2'] = 1 actions = self.actionGroup().actions() count = len(actions) for i, action in enumerate(actions): btn = QToolButton(self) btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) btn.setDefaultAction(action) self.layout().insertWidget(i, btn) options['top_left_radius'] = 1 options['bot_left_radius'] = 1 options['top_right_radius'] = 1 options['bot_right_radius'] = 1 if ( horiz ): options['padding_left'] = self._padding options['padding_right'] = self._padding else: options['padding_top'] = self._padding options['padding_bottom'] = self._padding if ( not i ): if ( horiz ): options['top_left_radius'] = self.cornerRadius() options['bot_left_radius'] = self.cornerRadius() options['padding_left'] += self.cornerRadius() / 3.0 else: options['top_left_radius'] = self.cornerRadius() options['top_right_radius'] = self.cornerRadius() options['padding_top'] += self.cornerRadius() / 3.0 elif ( i == count - 1 ): if ( horiz ): options['top_right_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_right'] += self.cornerRadius() / 3.0 else: options['bot_left_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_bottom'] += self.cornerRadius() / 3.0 btn.setStyleSheet(TOOLBUTTON_STYLE % options) btn.setAutoFillBackground(True)
Resets the user interface buttons for this widget.
Below is the the instruction that describes the task: ### Input: Resets the user interface buttons for this widget. ### Response: def reset( self ): """ Resets the user interface buttons for this widget. """ # clear previous widgets for btn in self.findChildren(QToolButton): btn.close() btn.setParent(None) btn.deleteLater() # determine coloring options palette = self.palette() unchecked = palette.color(palette.Button) # determine if this is a dark or light scheme avg = (unchecked.red() + unchecked.green() + unchecked.blue()) / 3.0 if ( avg < 140 ): checked = unchecked.lighter(115) checked_clr = self.colorString(unchecked.lighter(120)) border_clr = self.colorString(unchecked.darker(140)) unchecked_clr = self.colorString(checked.lighter(140)) unchecked_clr_alt = self.colorString(checked.lighter(120)) checked_clr_alt = self.colorString(unchecked) else: checked = unchecked.lighter(120) checked_clr = self.colorString(unchecked) border_clr = self.colorString(unchecked.darker(160)) unchecked_clr = self.colorString(checked) unchecked_clr_alt = self.colorString(checked.darker(130)) checked_clr_alt = self.colorString(unchecked.darker(120)) # define the stylesheet options options = {} options['top_left_radius'] = 0 options['top_right_radius'] = 0 options['bot_left_radius'] = 0 options['bot_right_radius'] = 0 options['border_color'] = border_clr options['checked_clr'] = checked_clr options['checked_clr_alt'] = checked_clr_alt options['unchecked_clr'] = unchecked_clr options['unchecked_clr_alt'] = unchecked_clr_alt options['padding_top'] = 1 options['padding_bottom'] = 1 options['padding_left'] = 1 options['padding_right'] = 1 horiz = self.direction() in (QBoxLayout.LeftToRight, QBoxLayout.RightToLeft) if ( horiz ): options['x1'] = 0 options['y1'] = 0 options['x2'] = 0 options['y2'] = 1 else: options['x1'] = 0 options['y1'] = 0 options['x2'] = 1 options['y2'] = 1 actions = self.actionGroup().actions() count = len(actions) for i, action in enumerate(actions): btn = QToolButton(self) btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) btn.setDefaultAction(action) self.layout().insertWidget(i, btn) options['top_left_radius'] = 1 options['bot_left_radius'] = 1 options['top_right_radius'] = 1 options['bot_right_radius'] = 1 if ( horiz ): options['padding_left'] = self._padding options['padding_right'] = self._padding else: options['padding_top'] = self._padding options['padding_bottom'] = self._padding if ( not i ): if ( horiz ): options['top_left_radius'] = self.cornerRadius() options['bot_left_radius'] = self.cornerRadius() options['padding_left'] += self.cornerRadius() / 3.0 else: options['top_left_radius'] = self.cornerRadius() options['top_right_radius'] = self.cornerRadius() options['padding_top'] += self.cornerRadius() / 3.0 elif ( i == count - 1 ): if ( horiz ): options['top_right_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_right'] += self.cornerRadius() / 3.0 else: options['bot_left_radius'] = self.cornerRadius() options['bot_right_radius'] = self.cornerRadius() options['padding_bottom'] += self.cornerRadius() / 3.0 btn.setStyleSheet(TOOLBUTTON_STYLE % options) btn.setAutoFillBackground(True)
def multiget_slice(self, keys, column_parent, predicate, consistency_level): """ Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_multiget_slice(keys, column_parent, predicate, consistency_level) return d
Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level
Below is the the instruction that describes the task: ### Input: Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level ### Response: def multiget_slice(self, keys, column_parent, predicate, consistency_level): """ Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_multiget_slice(keys, column_parent, predicate, consistency_level) return d
def options(self, parser, env=None): """ Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir`` """ if env is None: env = os.environ parser.add_option( '--sphinx-config-tpl', help='Path to the Sphinx configuration file template.', ) super(SphinxSearchPlugin, self).options(parser, env)
Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir``
Below is the the instruction that describes the task: ### Input: Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir`` ### Response: def options(self, parser, env=None): """ Sphinx config file that can optionally take the following python template string arguments: ``database_name`` ``database_password`` ``database_username`` ``database_host`` ``database_port`` ``sphinx_search_data_dir`` ``searchd_log_dir`` """ if env is None: env = os.environ parser.add_option( '--sphinx-config-tpl', help='Path to the Sphinx configuration file template.', ) super(SphinxSearchPlugin, self).options(parser, env)
def lock(self, lock_name, timeout=900): """ Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA """ # Try Redis first try: try: lock = self.cache.lock except AttributeError: try: # Possibly using old Django-Redis lock = self.cache.client.lock except AttributeError: # Possibly using Werkzeug + Redis lock = self.cache._client.lock have_lock = False lock = lock(lock_name, timeout=timeout) try: have_lock = lock.acquire(blocking=True) if have_lock: yield finally: if have_lock: lock.release() except AttributeError: # No lock method on the cache, so fall back to add have_lock = False try: while not have_lock: have_lock = self.cache.add(lock_name, 'locked', timeout) if have_lock: yield finally: if have_lock: self.cache.delete(lock_name)
Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA
Below is the the instruction that describes the task: ### Input: Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA ### Response: def lock(self, lock_name, timeout=900): """ Attempt to use lock and unlock, which will work if the Cache is Redis, but fall back to a memcached-compliant add/delete approach. If the Jobtastic Cache isn't Redis or Memcache, or another product with a compatible lock or add/delete API, then a custom locking function will be required. However, Redis and Memcache are expected to account for the vast majority of installations. See: - http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html - http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA """ # Try Redis first try: try: lock = self.cache.lock except AttributeError: try: # Possibly using old Django-Redis lock = self.cache.client.lock except AttributeError: # Possibly using Werkzeug + Redis lock = self.cache._client.lock have_lock = False lock = lock(lock_name, timeout=timeout) try: have_lock = lock.acquire(blocking=True) if have_lock: yield finally: if have_lock: lock.release() except AttributeError: # No lock method on the cache, so fall back to add have_lock = False try: while not have_lock: have_lock = self.cache.add(lock_name, 'locked', timeout) if have_lock: yield finally: if have_lock: self.cache.delete(lock_name)
def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) # Copy required fields to the job. copy_resource = config_resource["copy"] destination = TableReference.from_api_repr(copy_resource["destinationTable"]) sources = [] source_configs = copy_resource.get("sourceTables") if source_configs is None: single = copy_resource.get("sourceTable") if single is None: raise KeyError("Resource missing 'sourceTables' / 'sourceTable'") source_configs = [single] for source_config in source_configs: table_ref = TableReference.from_api_repr(source_config) sources.append(table_ref) job = cls(job_id, sources, destination, client=client, job_config=config) job._set_properties(resource) return job
Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``.
Below is the the instruction that describes the task: ### Input: Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. ### Response: def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) # Copy required fields to the job. copy_resource = config_resource["copy"] destination = TableReference.from_api_repr(copy_resource["destinationTable"]) sources = [] source_configs = copy_resource.get("sourceTables") if source_configs is None: single = copy_resource.get("sourceTable") if single is None: raise KeyError("Resource missing 'sourceTables' / 'sourceTable'") source_configs = [single] for source_config in source_configs: table_ref = TableReference.from_api_repr(source_config) sources.append(table_ref) job = cls(job_id, sources, destination, client=client, job_config=config) job._set_properties(resource) return job
def get_plugins_by_feature(features): """ Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob] """ if not features: return get_all_plugins() plugins = PluginLoader.load_all().items() names = set([f.__name__ for f in features]) return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())]
Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob]
Below is the the instruction that describes the task: ### Input: Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob] ### Response: def get_plugins_by_feature(features): """ Returns a list of plugin names where the plugins implement at least one of the *features*. *features* must a list of Plugin methods, e.g. [Plugin.postprocess_testrun, Plugin.postprocess_testjob] """ if not features: return get_all_plugins() plugins = PluginLoader.load_all().items() names = set([f.__name__ for f in features]) return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())]
def unit_conversion(current, desired): """ Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units """ current = str(current).strip().lower() desired = str(desired).strip().lower() conversion = TO_INCH[current] / TO_INCH[desired] return conversion
Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units
Below is the the instruction that describes the task: ### Input: Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units ### Response: def unit_conversion(current, desired): """ Calculate the conversion from one set of units to another. Parameters --------- current : str Unit system values are in now (eg 'millimeters') desired : str Unit system we'd like values in (eg 'inches') Returns --------- conversion : float Number to multiply by to put values into desired units """ current = str(current).strip().lower() desired = str(desired).strip().lower() conversion = TO_INCH[current] / TO_INCH[desired] return conversion
def factorize(self): """Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 """ # compute new coefficients for reconstructing data points self.update_w() # for CHNMF it is sometimes useful to only compute # the basis vectors if self._compute_h: self.update_h() self.W = self.mdl.W self.H = self.mdl.H self.ferr = np.zeros(1) self.ferr[0] = self.mdl.frobenius_norm() self._print_cur_status(' Fro:' + str(self.ferr[0]))
Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1
Below is the the instruction that describes the task: ### Input: Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 ### Response: def factorize(self): """Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 """ # compute new coefficients for reconstructing data points self.update_w() # for CHNMF it is sometimes useful to only compute # the basis vectors if self._compute_h: self.update_h() self.W = self.mdl.W self.H = self.mdl.H self.ferr = np.zeros(1) self.ferr[0] = self.mdl.frobenius_norm() self._print_cur_status(' Fro:' + str(self.ferr[0]))
def main(sleep_length=0.1): """Log to stdout using python logging in a while loop""" log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
Log to stdout using python logging in a while loop
Below is the the instruction that describes the task: ### Input: Log to stdout using python logging in a while loop ### Response: def main(sleep_length=0.1): """Log to stdout using python logging in a while loop""" log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
def build_groups(self, tokens): """Build dict of groups from list of tokens""" groups = {} for token in tokens: match_type = MatchType.start if token.group_end else MatchType.single groups[token.group_start] = (token, match_type) if token.group_end: groups[token.group_end] = (token, MatchType.end) return groups
Build dict of groups from list of tokens
Below is the the instruction that describes the task: ### Input: Build dict of groups from list of tokens ### Response: def build_groups(self, tokens): """Build dict of groups from list of tokens""" groups = {} for token in tokens: match_type = MatchType.start if token.group_end else MatchType.single groups[token.group_start] = (token, match_type) if token.group_end: groups[token.group_end] = (token, MatchType.end) return groups
def keywords(text, cloud=None, batch=False, api_key=None, version=2, batch_size=None, **kwargs): """ Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs """ if kwargs.get("language", "english") != "english": version = 1 url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="keywords", url_params=url_params, batch_size=batch_size, **kwargs)
Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs
Below is the the instruction that describes the task: ### Input: Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs ### Response: def keywords(text, cloud=None, batch=False, api_key=None, version=2, batch_size=None, **kwargs): """ Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs """ if kwargs.get("language", "english") != "english": version = 1 url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="keywords", url_params=url_params, batch_size=batch_size, **kwargs)
def extract_name_from_job_arn(arn): """Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job. """ slash_pos = arn.find('/') if slash_pos == -1: raise ValueError("Cannot parse invalid ARN: %s" % arn) return arn[(slash_pos + 1):]
Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job.
Below is the the instruction that describes the task: ### Input: Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job. ### Response: def extract_name_from_job_arn(arn): """Returns the name used in the API given a full ARN for a training job or hyperparameter tuning job. """ slash_pos = arn.find('/') if slash_pos == -1: raise ValueError("Cannot parse invalid ARN: %s" % arn) return arn[(slash_pos + 1):]
def libvlc_audio_set_mute(p_mi, status): '''Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ''' f = _Cfunctions.get('libvlc_audio_set_mute', None) or \ _Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, status)
Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
Below is the the instruction that describes the task: ### Input: Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ### Response: def libvlc_audio_set_mute(p_mi, status): '''Set mute status. @param p_mi: media player. @param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. ''' f = _Cfunctions.get('libvlc_audio_set_mute', None) or \ _Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, status)