text
stringlengths
78
104k
score
float64
0
0.18
def add_record(self, msg_id, rec): """Add a new Task Record, by msg_id.""" if self._records.has_key(msg_id): raise KeyError("Already have msg_id %r"%(msg_id)) self._records[msg_id] = rec
0.018018
def run_cli( executable, mets_url=None, resolver=None, workspace=None, page_id=None, log_level=None, input_file_grp=None, output_file_grp=None, parameter=None, working_dir=None, ): """ Create a workspace for mets_url and run MP CLI through it """ workspace = _get_workspace(workspace, resolver, mets_url, working_dir) args = [executable, '--working-dir', workspace.directory] args += ['--mets', mets_url] if log_level: args += ['--log-level', log_level] if page_id: args += ['--page-id', page_id] if input_file_grp: args += ['--input-file-grp', input_file_grp] if output_file_grp: args += ['--output-file-grp', output_file_grp] if parameter: args += ['--parameter', parameter] log.debug("Running subprocess '%s'", ' '.join(args)) return subprocess.call(args)
0.001075
def grid_select(self, grid, clear_selection=True): """Selects cells of grid with selection content""" if clear_selection: grid.ClearSelection() for (tl, br) in zip(self.block_tl, self.block_br): grid.SelectBlock(tl[0], tl[1], br[0], br[1], addToSelected=True) for row in self.rows: grid.SelectRow(row, addToSelected=True) for col in self.cols: grid.SelectCol(col, addToSelected=True) for cell in self.cells: grid.SelectBlock(cell[0], cell[1], cell[0], cell[1], addToSelected=True)
0.003215
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_port_nn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr") config = get_vmpolicy_macaddr output = ET.SubElement(get_vmpolicy_macaddr, "output") vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr") port_nn = ET.SubElement(vmpolicy_macaddr, "port-nn") port_nn.text = kwargs.pop('port_nn') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003407
def compile_foreign(self, blueprint, command, _): """ Compile a foreign key command. :param blueprint: The blueprint :type blueprint: Blueprint :param command: The command :type command: Fluent :rtype: str """ table = self.wrap_table(blueprint) on = self.wrap_table(command.on) columns = self.columnize(command.columns) on_columns = self.columnize(command.references if isinstance(command.references, list) else [command.references]) sql = 'ALTER TABLE %s ADD CONSTRAINT %s ' % (table, command.index) sql += 'FOREIGN KEY (%s) REFERENCES %s (%s)' % (columns, on, on_columns) if command.get('on_delete'): sql += ' ON DELETE %s' % command.on_delete if command.get('on_update'): sql += ' ON UPDATE %s' % command.on_update return sql
0.00309
def load_project(self, path, load=True): """ Load a project from a .gns3 :param path: Path of the .gns3 :param load: Load the topology """ topo_data = load_topology(path) topo_data.pop("topology") topo_data.pop("version") topo_data.pop("revision") topo_data.pop("type") if topo_data["project_id"] in self._projects: project = self._projects[topo_data["project_id"]] else: project = yield from self.add_project(path=os.path.dirname(path), status="closed", filename=os.path.basename(path), **topo_data) if load or project.auto_open: yield from project.open() return project
0.004161
def get_comments_content_object(parser, token): """ Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. usage: {% get_comments_content_object for form_object as variable_name %} """ keywords = token.contents.split() if len(keywords) != 5: raise template.TemplateSyntaxError( "'%s' tag takes exactly 2 arguments" % (keywords[0],)) if keywords[1] != 'for': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'for'" % (keywords[0],)) if keywords[3] != 'as': raise template.TemplateSyntaxError( "first argument to '%s' tag must be 'as'" % (keywords[0],)) return GetCommentsContentObject(keywords[2], keywords[4])
0.001239
def libvlc_media_get_mrl(p_md): '''Get the media resource locator (mrl) from a media descriptor object. @param p_md: a media descriptor object. @return: string with mrl of media descriptor object. ''' f = _Cfunctions.get('libvlc_media_get_mrl', None) or \ _Cfunction('libvlc_media_get_mrl', ((1,),), string_result, ctypes.c_void_p, Media) return f(p_md)
0.004938
def sort_idx(m, reverse=False): """Return the indices of m in sorted order (default: ascending order)""" return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse)
0.005618
def andeshelp(group=None, category=None, model_list=None, model_format=None, model_var=None, quick_help=None, help_option=None, help_config=None, export='plain', **kwargs): """ Print the requested help and documentation to stdout. Parameters ---------- group : None or str Name of a group whose model names will be printed category : None or str Name of a category whose models will be printed model_list : bool If ``True``, print the full model list. model_format : None or str Names of models whose parameter definitions will be printed. Model names are separated by comma without space. model_var : None or str A pair of model name and parameter name separated by dot. For example, ``Bus.voltage`` stands for the ``voltage`` parameter of ``Bus``. quick_help : None or str Name of a model to print a quick help of parameter definitions. help_option : None or str A pair of config name and option name separated by dot. For example, ``System.sparselib`` stands for the ``sparselib`` option of the ``System`` config. help_config : None or str Config names whose option definitions will be printed. Configs are separated by comma without space. For example, ``System,Pflow``. In the naming convention, the first letter is captialized. export : None or {'plain', 'latex'} Formatting style available in plain text or LaTex format. This option has not been implemented. kwargs : None or dict Other keyword arguments Returns ------- bool True if any help function is executed. Notes ----- The outputs can be written to a text file using shell command, for example, :: andes -q Bus > bus_help.txt """ out = [] if not (group or category or model_list or model_format or model_var or quick_help or help_option or help_config): return False from .models import all_models_list if category: raise NotImplementedError if model_list: raise NotImplementedError system = PowerSystem() if model_format: if model_format.lower() == 'all': model_format = all_models_list else: model_format = model_format.split(',') for item in model_format: if item not in all_models_list: logger.warning('Model <{}> does not exist.'.format(item)) model_format.remove(item) if len(model_format) > 0: for item in model_format: out.append(system.__dict__[item].doc(export=export)) if model_var: model_var = model_var.split('.') if len(model_var) == 1: logger.error('Model and parameter not separated by dot.') elif len(model_var) > 2: logger.error('Model parameter not specified correctly.') else: dev, var = model_var if not hasattr(system, dev): logger.error('Model <{}> does not exist.'.format(dev)) else: if var not in system.__dict__[dev]._data.keys(): logger.error( 'Model <{}> does not have parameter <{}>.'.format( dev, var)) else: c1 = system.__dict__[dev]._descr.get(var, 'no Description') c2 = system.__dict__[dev]._data.get(var) c3 = system.__dict__[dev]._units.get(var, 'no Unit') out.append('{}: {}, default = {:g} {}'.format( '.'.join(model_var), c1, c2, c3)) if group: group_dict = {} match = [] for model in all_models_list: g = system.__dict__[model]._group if g not in group_dict: group_dict[g] = [] group_dict[g].append(model) if group.lower() == 'all': match = sorted(list(group_dict.keys())) else: group = [group] # search for ``group`` in all group names and store in ``match`` for item in group_dict.keys(): if group[0].lower() in item.lower(): match.append(item) # if group name not found if len(match) == 0: out.append('Group <{:s}> not found.'.format(group[0])) for item in match: group_models = sorted(list(group_dict[item])) out.append('<{:s}>'.format(item)) out.append(', '.join(group_models)) out.append('') if quick_help: if quick_help not in all_models_list: out.append('Model <{}> does not exist.'.format(quick_help)) else: out.append(system.__dict__[quick_help].doc(export=export)) if help_option: raise NotImplementedError if help_config: all_config = ['system', 'pflow', 'tds', 'eig'] if help_config.lower() == 'all': help_config = all_config else: help_config = help_config.split(',') for item in help_config: if item.lower() not in all_config: logger.warning('Config <{}> does not exist.'.format(item)) help_config.remove(item) if len(help_config) > 0: for item in help_config: if item == 'system': out.append(system.config.doc(export=export)) else: out.append(system.__dict__[item.lower()].config.doc( export=export)) print('\n'.join(out)) # NOQA return True
0.000171
def plot(self): """ Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis) """ seaborn.pointplot( y = self.pointwise.pareto_k, x = self.pointwise.index, join = False)
0.026393
def t_INITIAL_SHARP(self, t): r'\#' if self.find_column(t) == 1: t.lexer.begin('preproc') else: self.t_INITIAL_preproc_error(t)
0.011364
def restart(self): """ Tells the HAProxy control object to restart the process. If it's been fewer than `restart_interval` seconds since the previous restart, it will wait until the interval has passed. This staves off situations where the process is constantly restarting, as it is possible to drop packets for a short interval while doing so. """ delay = (self.last_restart - time.time()) + self.restart_interval if delay > 0: time.sleep(delay) self.control.restart() self.last_restart = time.time() self.restart_required = False
0.003106
def parse_bdstoken(content): '''从页面中解析出bdstoken等信息. 这些信息都位于页面底部的<script>, 只有在授权后的页面中才出现. 这里, 为了保证兼容性, 就不再使用cssselect模块解析了. @return 返回bdstoken ''' bdstoken = '' bds_re = re.compile('"bdstoken"\s*:\s*"([^"]+)"', re.IGNORECASE) bds_match = bds_re.search(content) if bds_match: bdstoken = bds_match.group(1) return bdstoken
0.010724
def gff3_verifier(entries, line=None): """Raises error if invalid GFF3 format detected Args: entries (list): A list of GFF3Entry instances line (int): Line number of first entry Raises: FormatError: Error when GFF3 format incorrect with descriptive message """ regex = r'^[a-zA-Z0-9.:^*$@!+_?-|]+\t.+\t.+\t\d+\t\d+\t' \ + r'\d*\.?\d*\t[+-.]\t[.0-2]\t.+{0}$'.format(os.linesep) delimiter = r'\t' for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: # Format info on what entry error came from if line: intro = 'Line {0}'.format(str(line)) elif error.part == 0: intro = 'Entry with source {0}'.format(entry.source) else: intro = 'Entry with Sequence ID {0}'.format(entry.seqid) # Generate error if error.part == 0: msg = '{0} has no Sequence ID'.format(intro) elif error.part == 1: msg = '{0} has no source'.format(intro) elif error.part == 2: msg = '{0} has non-numerical characters in type'.format(intro) elif error.part == 3: msg = '{0} has non-numerical characters in ' \ 'start position'.format(intro) elif error.part == 4: msg = '{0} has non-numerical characters in ' \ 'end position'.format(intro) elif error.part == 5: msg = '{0} has non-numerical characters in score'.format(intro) elif error.part == 6: msg = '{0} strand not in [+-.]'.format(intro) elif error.part == 7: msg = '{0} phase not in [.0-2]'.format(intro) elif error.part == 8: msg = '{0} has no attributes'.format(intro) else: msg = 'Unknown Error: Likely a Bug' raise FormatError(message=msg) if line: line += 1
0.000477
def traceback_plot(self,fsize=(6,4)): """ Plots a path of the possible last 4 states. Parameters ---------- fsize : Plot size for matplotlib. Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm.fec_conv import fec_conv >>> from sk_dsp_comm import digitalcom as dc >>> import numpy as np >>> cc = fec_conv() >>> x = np.random.randint(0,2,100) >>> state = '00' >>> y,state = cc.conv_encoder(x,state) >>> # Add channel noise to bits translated to +1/-1 >>> yn = dc.cpx_AWGN(2*y-1,5,1) # SNR = 5 dB >>> # Translate noisy +1/-1 bits to soft values on [0,7] >>> yn = (yn.real+1)/2*7 >>> z = cc.viterbi_decoder(yn) >>> cc.traceback_plot() >>> plt.show() """ traceback_states = self.paths.traceback_states plt.figure(figsize=fsize) plt.axis([-self.decision_depth+1, 0, -(self.Nstates-1)-0.5, 0.5]) M,N = traceback_states.shape traceback_states = -traceback_states[:,::-1] plt.plot(range(-(N-1),0+1),traceback_states.T) plt.xlabel('Traceback Symbol Periods') plt.ylabel('State Index $0$ to -$2^{(K-1)}$') plt.title('Survivor Paths Traced Back From All %d States' % self.Nstates) plt.grid()
0.007013
def update(self, columnIndex, vector): """ Wraps setRowFromDense()""" return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense( columnIndex, vector )
0.005525
def search_news(q, start=1, count=10, wait=10, asynchronous=False, cached=False): """ Returns a Yahoo news query formatted as a YahooSearch list object. """ service = YAHOO_NEWS return YahooSearch(q, start, count, service, None, wait, asynchronous, cached)
0.01773
def memoized_parse_block(code): """Memoized version of parse_block.""" success, result = parse_block_memo.get(code, (None, None)) if success is None: try: parsed = COMPILER.parse_block(code) except Exception as err: success, result = False, err else: success, result = True, parsed parse_block_memo[code] = (success, result) if success: return result else: raise result
0.002114
def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if isinstance(value, _BASESTRING_TYPES): return value assert isinstance(value, bytes) return value.decode("utf-8")
0.001704
def admin_tools_render_menu_item(context, item, index=None): """ Template tag that renders a given menu item, it takes a ``MenuItem`` instance as unique parameter. """ item.init_with_context(context) context.update({ 'template': item.template, 'item': item, 'index': index, 'selected': item.is_selected(context['request']), 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
0.002088
def redo(self): """ Performs the top group on the redo stack, if present. Creates an undo group with the same name. Raises RuntimeError if called while undoing. """ if self._undoing or self._redoing: raise RuntimeError if not self._redo: return group = self._redo.pop() self._redoing = True self.begin_grouping() group.perform() self.set_action_name(group.name) self.end_grouping() self._redoing = False self.notify()
0.003591
def pop_marker(self, reset): """ Pop a marker off of the marker stack. If reset is True then the iterator will be returned to the state it was in before the corresponding call to push_marker(). """ marker = self.markers.pop() if reset: # Make the values available to be read again marker.extend(self.look_ahead) self.look_ahead = marker elif self.markers: # Otherwise, reassign the values to the top marker self.markers[-1].extend(marker) else: # If there are not more markers in the stack then discard the values pass
0.004484
def cmd_gasheli(self, args): '''gas help commands''' usage = "Usage: gasheli <start|stop|set>" if len(args) < 1: print(usage) return if args[0] == "start": self.start_motor() elif args[0] == "stop": self.stop_motor() elif args[0] == "set": self.gasheli_settings.command(args[1:]) else: print(usage)
0.004695
def fol_fc_ask(KB, alpha): """Inefficient forward chaining for first-order logic. [Fig. 9.3] KB is a FolKB and alpha must be an atomic sentence.""" while True: new = {} for r in KB.clauses: ps, q = parse_definite_clause(standardize_variables(r)) raise NotImplementedError
0.003096
def list_versions(self, layer_id): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self.client.get_url('VERSION', 'GET', 'multi', {'layer_id': layer_id}) return base.Query(self, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
0.012103
def handle_version(self, message_header, message): """ This method will handle the Version message and will send a VerAck message when it receives the Version message. :param message_header: The Version message header :param message: The Version message """ log.debug("handle version") verack = VerAck() log.debug("send VerAck") self.send_message(verack) self.verack = True start_block_height = sorted(self.blocks.keys())[0] if start_block_height < 1: start_block_height = 1 # ask for all blocks block_hashes = [] for height in sorted(self.blocks.keys()): block_hashes.append( int(self.blocks[height], 16) ) start_block_height = sorted(self.blocks.keys())[0] end_block_height = sorted(self.blocks.keys())[-1] log.debug("send getdata for %s-%s (%064x-%064x)" % (start_block_height, end_block_height, block_hashes[0], block_hashes[-1])) # send off the getdata getdata = GetData() block_inv_vec = [] for block_hash in block_hashes: block_inv = Inventory() block_inv.inv_type = INVENTORY_TYPE["MSG_BLOCK"] block_inv.inv_hash = block_hash block_inv_vec.append(block_inv) getdata.inventory = block_inv_vec self.send_message(getdata)
0.004231
def unionfs(rw='rw', ro=None, union='union'): """ Decorator for the UnionFS feature. This configures a unionfs for projects. The given base_dir and/or image_dir are layered as follows: image_dir=RW:base_dir=RO All writes go to the image_dir, while base_dir delivers the (read-only) versions of the rest of the filesystem. The unified version will be provided in the project's builddir. Unmouting is done as soon as the function completes. Args: rw: writeable storage area for the unified fuse filesystem. ro: read-only storage area for the unified fuse filesystem. union: mountpoint of the unified fuse filesystem. """ from functools import wraps def wrap_in_union_fs(func): """ Function that wraps a given function inside the file system. Args: func: The function that needs to be wrapped inside the unions fs. Return: The file system with the function wrapped inside. """ @wraps(func) def wrap_in_union_fs_func(project, *args, **kwargs): """ Wrap the func in the UnionFS mount stack. We make sure that the mount points all exist and stack up the directories for the unionfs. All directories outside of the default build environment are tracked for deletion. """ container = project.container if container is None or in_container(): return func(project, *args, **kwargs) build_dir = local.path(project.builddir) LOG.debug("UnionFS - Project builddir: %s", project.builddir) if __unionfs_is_active(root=build_dir): LOG.debug( "UnionFS already active in %s, nesting not supported.", build_dir) return func(project, *args, **kwargs) ro_dir = local.path(container.local) rw_dir = build_dir / rw un_dir = build_dir / union LOG.debug("UnionFS - RW: %s", rw_dir) unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir) project_builddir_bak = project.builddir project.builddir = un_dir proc = unionfs_cmd.popen() while (not __unionfs_is_active(root=un_dir)) and \ (proc.poll() is None): pass ret = None if proc.poll() is None: try: with local.cwd(un_dir): ret = func(project, *args, **kwargs) finally: project.builddir = project_builddir_bak from signal import SIGINT is_running = proc.poll() is None while __unionfs_is_active(root=un_dir) and is_running: try: proc.send_signal(SIGINT) proc.wait(timeout=3) except subprocess.TimeoutExpired: proc.kill() is_running = False LOG.debug("Unionfs shut down.") if __unionfs_is_active(root=un_dir): raise UnmountError() return ret return wrap_in_union_fs_func return wrap_in_union_fs
0.000297
def poke(self, context): """ Pokes for a mail attachment on the mail server. :param context: The context that is being provided when poking. :type context: dict :return: True if attachment with the given name is present and False if not. :rtype: bool """ self.log.info('Poking for %s', self.attachment_name) with ImapHook(imap_conn_id=self.conn_id) as imap_hook: return imap_hook.has_mail_attachment( name=self.attachment_name, mail_folder=self.mail_folder, check_regex=self.check_regex )
0.004732
def write(self, destination, filename, content): """ Write a file at the specific destination with the content. Args: destination (string): the destination location filename (string): the filename that will be written content (string): the content of the filename """ if not os.path.exists(destination): try: os.makedirs(destination) except: # The directory can be created while creating it. pass filepath = "%s/%s" % (destination, filename) f = open(filepath, "w+") f.write(content) f.close()
0.004478
def filter(self, filters=None, keep=True, inplace=False, **kwargs): """Return a filtered IamDataFrame (i.e., a subset of current data) Parameters ---------- keep: bool, default True keep all scenarios satisfying the filters (if True) or the inverse inplace: bool, default False if True, do operation inplace and return None filters by kwargs or dict (deprecated): The following columns are available for filtering: - metadata columns: filter by category assignment - 'model', 'scenario', 'region', 'variable', 'unit': string or list of strings, where `*` can be used as a wildcard - 'level': the maximum "depth" of IAM variables (number of '|') (excluding the strings given in the 'variable' argument) - 'year': takes an integer, a list of integers or a range note that the last year of a range is not included, so `range(2010, 2015)` is interpreted as `[2010, ..., 2014]` - arguments for filtering by `datetime.datetime` ('month', 'hour', 'time') - 'regexp=True' disables pseudo-regexp syntax in `pattern_match()` """ if filters is not None: msg = '`filters` keyword argument in `filter()` is deprecated ' + \ 'and will be removed in the next release' warnings.warn(msg) kwargs.update(filters) _keep = self._apply_filters(**kwargs) _keep = _keep if keep else ~_keep ret = copy.deepcopy(self) if not inplace else self ret.data = ret.data[_keep] idx = _make_index(ret.data) if len(idx) == 0: logger().warning('Filtered IamDataFrame is empty!') ret.meta = ret.meta.loc[idx] if not inplace: return ret
0.001056
def flags(self): """Return set of flags.""" return set((name.lower() for name in sorted(TIFF.FILE_FLAGS) if getattr(self, 'is_' + name)))
0.011561
def merge(cls, trees): """ Merge a collection of AttrTree objects. """ first = trees[0] for tree in trees: first.update(tree) return first
0.010101
def merge_maps(m, base): """ Merge in undefined map entries from given map. @param m: Map to be merged into. @type m: lems.util.Map @param base: Map to be merged into. @type base: lems.util.Map """ for k in base.keys(): if k not in m: m[k] = base[k]
0.012658
async def _workaround_1695335(self, delta, old, new, model): """ This is a (hacky) temporary work around for a bug in Juju where the instance status and agent version fields don't get updated properly by the AllWatcher. Deltas never contain a value for `data['agent-status']['version']`, and once the `instance-status` reaches `pending`, we no longer get any updates for it (the deltas come in, but the `instance-status` data is always the same after that). To work around this, whenever a delta comes in for this machine, we query FullStatus and use the data from there if and only if it's newer. Luckily, the timestamps on the `since` field does seem to be accurate. See https://bugs.launchpad.net/juju/+bug/1695335 """ if delta.data.get('synthetic', False): # prevent infinite loops re-processing already processed deltas return full_status = await utils.run_with_interrupt(model.get_status(), model._watch_stopping, loop=model.loop) if model._watch_stopping.is_set(): return if self.id not in full_status.machines: return if not full_status.machines[self.id]['instance-status']['since']: return machine = full_status.machines[self.id] change_log = [] key_map = { 'status': 'current', 'info': 'message', 'since': 'since', } # handle agent version specially, because it's never set in # deltas, and we don't want even a newer delta to clear it agent_version = machine['agent-status']['version'] if agent_version: delta.data['agent-status']['version'] = agent_version change_log.append(('agent-version', '', agent_version)) # only update (other) delta fields if status data is newer status_since = pyrfc3339.parse(machine['instance-status']['since']) delta_since = pyrfc3339.parse(delta.data['instance-status']['since']) if status_since > delta_since: for status_key in ('status', 'info', 'since'): delta_key = key_map[status_key] status_value = machine['instance-status'][status_key] delta_value = delta.data['instance-status'][delta_key] change_log.append((delta_key, delta_value, status_value)) delta.data['instance-status'][delta_key] = status_value if change_log: log.debug('Overriding machine delta with FullStatus data') for log_item in change_log: log.debug(' {}: {} -> {}'.format(*log_item)) delta.data['synthetic'] = True old_obj, new_obj = self.model.state.apply_delta(delta) await model._notify_observers(delta, old_obj, new_obj)
0.000667
def add_advisor(self, name, ids=None, degree_type=None, record=None, curated=False): """Add an advisor. Args: :param name: full name of the advisor. :type name: string :param ids: list with the IDs of the advisor. :type ids: list :param degree_type: one of the allowed types of degree the advisor helped with. :type degree_type: string :param record: URI for the advisor. :type record: string :param curated: if the advisor relation has been curated i.e. has been verified. :type curated: boolean """ new_advisor = {} new_advisor['name'] = normalize_name(name) if ids: new_advisor['ids'] = force_list(ids) if degree_type: new_advisor['degree_type'] = degree_type if record: new_advisor['record'] = record new_advisor['curated_relation'] = curated self._append_to('advisors', new_advisor)
0.004864
def encrypt_report(self, device_id, root, data, **kwargs): """Encrypt a buffer of report data on behalf of a device. Args: device_id (int): The id of the device that we should encrypt for root (int): The root key type that should be used to generate the report data (bytearray): The data that we should encrypt. **kwargs: There are additional specific keyword args that are required depending on the root key used. Typically, you must specify - report_id (int): The report id - sent_timestamp (int): The sent timestamp of the report These two bits of information are used to construct the per report signing and encryption key from the specific root key type. Returns: dict: The encrypted data and any associated metadata about the data. The data itself must always be a bytearray stored under the 'data' key, however additional keys may be present depending on the encryption method used. Raises: NotFoundError: If the auth provider is not able to encrypt the data. """ for _priority, provider in self.providers: try: return provider.encrypt_report(device_id, root, data, **kwargs) except NotFoundError: pass raise NotFoundError("encrypt_report method is not implemented in any sub_providers")
0.006609
def delete_service(self, service_id): """Deletes a service from the loadbal_id. :param int service_id: The id of the service to delete """ svc = self.client['Network_Application_Delivery_Controller_' 'LoadBalancer_Service'] return svc.deleteObject(id=service_id)
0.006042
def _client_connection(self, conn, addr): ''' Handle the connecition with one client. ''' log.debug('Established connection with %s:%d', addr[0], addr[1]) conn.settimeout(self.socket_timeout) try: while self.__up: msg = conn.recv(self.buffer_size) if not msg: # log.debug('Received empty message from %s', addr) # disabled ^ as it was too noisy continue log.debug('[%s] Received %s from %s. Adding in the queue', time.time(), msg, addr) self.buffer.put((msg, '{}:{}'.format(addr[0], addr[1]))) except socket.timeout: if not self.__up: return log.debug('Connection %s:%d timed out', addr[1], addr[0]) raise ListenerException('Connection %s:%d timed out' % addr) finally: log.debug('Closing connection with %s', addr) conn.close()
0.002988
def run(self): """ Perform build_cmake before doing the 'normal' stuff """ for extension in self.extensions: if extension.name == "bpy": self.build_cmake(extension) super().run()
0.008
def find(): """Find the configuration file if any.""" names = ('archan.yml', 'archan.yaml', '.archan.yml', '.archan.yaml') current_dir = os.getcwd() configconfig_file = os.path.join(current_dir, '.configconfig') default_config_dir = os.path.join(current_dir, 'config') if os.path.isfile(configconfig_file): logger.debug('Reading %s to get config folder path', configconfig_file) with open(configconfig_file) as stream: config_dir = os.path.join(current_dir, stream.read()).strip() elif os.path.isdir(default_config_dir): config_dir = default_config_dir else: config_dir = current_dir logger.debug('Config folder = %s', config_dir) for name in names: config_file = os.path.join(config_dir, name) logger.debug('Searching for config file at %s', config_file) if os.path.isfile(config_file): logger.debug('Found %s', config_file) return config_file logger.debug('No config file found') return None
0.001745
def chat_react(self, msg_id, emoji='smile', **kwargs): """Updates the text of the chat message.""" return self.__call_api_post('chat.react', messageId=msg_id, emoji=emoji, kwargs=kwargs)
0.014851
def apply_option(self, cmd, option, active=True): """Apply a command-line option.""" return re.sub(r'{{{}\:(?P<option>[^}}]*)}}'.format(option), '\g<option>' if active else '', cmd)
0.013699
def get_attachment_content(self, ticket_id, attachment_id): """ Get content of attachment without headers. This function is necessary to use for binary attachment, as it can contain ``\\n`` chars, which would disrupt parsing of message if :py:meth:`~Rt.get_attachment` is used. Format of message:: RT/3.8.7 200 Ok\n\nStart of the content...End of the content\n\n\n :param ticket_id: ID of ticket :param attachment_id: ID of attachment Returns: Bytes with content of attachment or None if ticket or attachment does not exist. """ msg = self.__request('ticket/{}/attachments/{}/content'.format (str(ticket_id), str(attachment_id)), text_response=False) lines = msg.split(b'\n', 3) if (len(lines) == 4) and (self.RE_PATTERNS['invalid_attachment_pattern_bytes'].match(lines[2]) or self.RE_PATTERNS['does_not_exist_pattern_bytes'].match(lines[2])): return None return msg[msg.find(b'\n') + 2:-3]
0.00273
def upload(sess_id_or_alias, files): """ Upload files to user's home folder. \b SESSID: Session ID or its alias given when creating the session. FILES: Path to upload. """ if len(files) < 1: return with Session() as session: try: print_wait('Uploading files...') kernel = session.Kernel(sess_id_or_alias) kernel.upload(files, show_progress=True) print_done('Uploaded.') except Exception as e: print_error(e) sys.exit(1)
0.001825
def generator_name(cls): """ :meth:`.WHashGeneratorProto.generator_name` implementation """ if cls.__generator_name__ is None: raise ValueError('"__generator_name__" should be override in a derived class') if isinstance(cls.__generator_name__, str) is False: raise TypeError('"__generator_name__" should be a str instance') return cls.__generator_name__.upper()
0.026525
def __pack_message(operation, data): """Takes message data and adds a message header based on the operation. Returns the resultant message string. """ request_id = _randint() message = struct.pack("<i", 16 + len(data)) message += struct.pack("<i", request_id) message += _ZERO_32 # responseTo message += struct.pack("<i", operation) return (request_id, message + data)
0.002463
def _set_client(self): """Set client property if not set.""" if self._client is None: if mongo_proxy: self._client = mongo_proxy.MongoProxy( pymongo.MongoClient(self.connection_string), logger=LOG) else: LOG.warning("MongoDBProxy not imported. AutoReconnect " "is not enabled.") self._client = mongo_proxy.MongoProxy( pymongo.MongoClient(self.connection_string), logger=LOG) LOG.debug("Created new connection to MongoDB: %s", self.safe_connection_string)
0.002924
def file(self, owner=None, **kwargs): """ Create the File TI object. Args: owner: **kwargs: Return: """ return File(self.tcex, owner=owner, **kwargs)
0.008929
def prepare_worker(self): """ Prepare the worker, ready to be launched: prepare options, create a log handler if none, and manage dry_run options """ worker_options = self.prepare_worker_options() self.worker = self.options.worker_class(**worker_options) if self.update_title: self.worker._add_update_status_callback(self.update_proc_title) self.update_proc_title() if not self.worker.logger.handlers: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( ' '.join(['[%(process)d]', # '%(asctime)s,%(msecs).03d', '%(asctime)s', '(%(name)s)', '%(levelname)-8s', '%(message)s', ]) # , '%y.%m.%d:%H.%M.%S' )) self.worker.logger.addHandler(handler) if self.options.dry_run: self.worker.end_forced = True
0.002344
def covariance_between_points(self, kern, X, X1, X2): """ Computes the posterior covariance between points. :param kern: GP kernel :param X: current input observations :param X1: some input observations :param X2: other input observations """ # ndim == 3 is a model for missing data if self.woodbury_chol.ndim != 2: raise RuntimeError("This method does not support posterior for missing data models") Kx1 = kern.K(X, X1) Kx2 = kern.K(X, X2) K12 = kern.K(X1, X2) tmp1 = dtrtrs(self.woodbury_chol, Kx1)[0] tmp2 = dtrtrs(self.woodbury_chol, Kx2)[0] var = K12 - tmp1.T.dot(tmp2) return var
0.004115
def base_url(klass, space_id, parent_resource_id, resource_url='entries', resource_id=None, environment_id=None): """ Returns the URI for the snapshot. """ return "spaces/{0}{1}/{2}/{3}/snapshots/{4}".format( space_id, '/environments/{0}'.format(environment_id) if environment_id is not None else '', resource_url, parent_resource_id, resource_id if resource_id is not None else '' )
0.008264
def cli(env, package_keyname, location, preset, verify, billing, complex_type, quantity, extras, order_items): """Place or verify an order. This CLI command is used for placing/verifying an order of the specified package in the given location (denoted by a datacenter's long name). Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer.OrderingManager.place_order() with the same keynames. Packages for ordering can be retrieved from `slcli order package-list` Presets for ordering can be retrieved from `slcli order preset-list` (not all packages have presets) Items can be retrieved from `slcli order item-list`. In order to find required items for the order, use `slcli order category-list`, and then provide the --category option for each category code in `slcli order item-list`. Example:: # Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk, # Ubuntu 16.04, and 1 Gbps public & private uplink in dal13 slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\ GUEST_CORES_4 \\ RAM_16_GB \\ REBOOT_REMOTE_CONSOLE \\ 1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\ BANDWIDTH_0_GB_2 \\ 1_IP_ADDRESS \\ GUEST_DISK_100_GB_SAN \\ OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\ MONITORING_HOST_PING \\ NOTIFICATION_EMAIL_AND_TICKET \\ AUTOMATED_NOTIFICATION \\ UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\ NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\ --extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\ --complex-type SoftLayer_Container_Product_Order_Virtual_Guest """ manager = ordering.OrderingManager(env.client) if extras: try: extras = json.loads(extras) except ValueError as err: raise exceptions.CLIAbort("There was an error when parsing the --extras value: {}".format(err)) args = (package_keyname, location, order_items) kwargs = {'preset_keyname': preset, 'extras': extras, 'quantity': quantity, 'complex_type': complex_type, 'hourly': bool(billing == 'hourly')} if verify: table = formatting.Table(COLUMNS) order_to_place = manager.verify_order(*args, **kwargs) for price in order_to_place['orderContainers'][0]['prices']: cost_key = 'hourlyRecurringFee' if billing == 'hourly' else 'recurringFee' table.add_row([ price['item']['keyName'], price['item']['description'], price[cost_key] if cost_key in price else formatting.blank() ]) else: if not (env.skip_confirmations or formatting.confirm( "This action will incur charges on your account. Continue?")): raise exceptions.CLIAbort("Aborting order.") order = manager.place_order(*args, **kwargs) table = formatting.KeyValueTable(['name', 'value']) table.align['name'] = 'r' table.align['value'] = 'l' table.add_row(['id', order['orderId']]) table.add_row(['created', order['orderDate']]) table.add_row(['status', order['placedOrder']['status']]) env.fout(table)
0.002338
def get_fs(path): """Find the file system implementation for this path.""" scheme = '' if '://' in path: scheme = path.partition('://')[0] for schemes, fs_class in FILE_EXTENSIONS: if scheme in schemes: return fs_class return FileSystem
0.003497
def fromProfileName(cls, name): """Return a `SessionAPI` from a given configuration profile name. :see: `ProfileStore`. """ with profiles.ProfileStore.open() as config: return cls.fromProfile(config.load(name))
0.007843
def ip_rtm_config_route_static_bfd_bfd_static_route_bfd_static_route_src(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static = ET.SubElement(route, "static") bfd = ET.SubElement(static, "bfd") bfd_static_route = ET.SubElement(bfd, "bfd-static-route") bfd_static_route_dest_key = ET.SubElement(bfd_static_route, "bfd-static-route-dest") bfd_static_route_dest_key.text = kwargs.pop('bfd_static_route_dest') bfd_static_route_src = ET.SubElement(bfd_static_route, "bfd-static-route-src") bfd_static_route_src.text = kwargs.pop('bfd_static_route_src') callback = kwargs.pop('callback', self._callback) return callback(config)
0.007128
def connected_channel(self): """ Returns the voice channel the player is connected to. """ if not self.channel_id: return None return self._lavalink.bot.get_channel(int(self.channel_id))
0.008772
def _get_view_infos( self, trimmed=False): """query the sherlock-catalogues database view metadata """ self.log.debug('starting the ``_get_view_infos`` method') sqlQuery = u""" SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc """ % locals() viewInfo = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) if trimmed: cleanTable = [] for r in viewInfo: orow = collections.OrderedDict(sorted({}.items())) for c in self.basicColumns: if c in r: orow[c] = r[c] cleanTable.append(orow) viewInfo = cleanTable self.log.debug('completed the ``_get_view_infos`` method') return viewInfo
0.002639
def start(self, any_zone): """Start the event listener listening on the local machine at port 1400 (default) Make sure that your firewall allows connections to this port Args: any_zone (SoCo): Any Sonos device on the network. It does not matter which device. It is used only to find a local IP address reachable by the Sonos net. Note: The port on which the event listener listens is configurable. See `config.EVENT_LISTENER_PORT` """ # Find our local network IP address which is accessible to the # Sonos net, see http://stackoverflow.com/q/166506 with self._start_lock: if not self.is_running: # Use configured IP address if there is one, else detect # automatically. if config.EVENT_LISTENER_IP: ip_address = config.EVENT_LISTENER_IP else: temp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) temp_sock.connect((any_zone.ip_address, config.EVENT_LISTENER_PORT)) ip_address = temp_sock.getsockname()[0] temp_sock.close() # Start the event listener server in a separate thread. self.address = (ip_address, config.EVENT_LISTENER_PORT) self._listener_thread = EventServerThread(self.address) self._listener_thread.daemon = True self._listener_thread.start() self.is_running = True log.info("Event listener started")
0.001149
def iter_schemas(self, schema: Schema) -> Iterable[Tuple[str, Any]]: """ Build zero or more JSON schemas for a marshmallow schema. Generates: name, schema pairs. """ if not schema: return yield self.to_tuple(schema) for name, field in self.iter_fields(schema): if isinstance(field, Nested): yield self.to_tuple(field.schema) yield from self.iter_schemas(field.schema) if isinstance(field, List) and isinstance(field.container, Nested): yield self.to_tuple(field.container.schema) yield from self.iter_schemas(field.container.schema)
0.002886
def keep_episodes(show, keep): """ Delete all but last count episodes in show. """ deleted = 0 print('%s Cleaning %s to %s episodes.' % (datestr(), show.title, keep)) sort = lambda x:x.originallyAvailableAt or x.addedAt items = sorted(show.episodes(), key=sort, reverse=True) for episode in items[keep:]: delete_episode(episode) deleted += 1 return deleted
0.0075
def lazy_load_font(font_size=default_font_size): """ Lazy loading font according to system platform """ if font_size not in _font_cache: if _platform.startswith("darwin"): font_path = "/Library/Fonts/Arial.ttf" elif _platform.startswith("linux"): font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform.startswith("win32"): font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache[font_size] = ImageFont.truetype(font_path, font_size) return _font_cache[font_size]
0.003413
def isTemporal(inferenceType): """ Returns True if the inference type is 'temporal', i.e. requires a temporal memory in the network. """ if InferenceType.__temporalInferenceTypes is None: InferenceType.__temporalInferenceTypes = \ set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return inferenceType in InferenceType.__temporalInferenceTypes
0.002959
def sentry_reraise(exc): """Re-raise an exception after logging it to Sentry Use this for top-level exceptions when you want the user to see the traceback. Must be called from within an exception handler. """ sentry_exc(exc) # this will messily add this "reraise" function to the stack trace # but hopefully it's not too bad six.reraise(type(exc), exc, sys.exc_info()[2])
0.004938
def _call_command(self, name, *args, **kwargs): """ Add lock management and call parent. """ meth = super(RedisField, self)._call_command if self.indexable and name in self.available_modifiers: with FieldLock(self): try: result = meth(name, *args, **kwargs) except: self._rollback_indexes() raise else: return result finally: self._reset_indexes_caches() else: return meth(name, *args, **kwargs)
0.004754
def oauth2decorator_from_clientsecrets(filename, scope, message=None, cache=None): """Creates an OAuth2Decorator populated from a clientsecrets file. Args: filename: string, File name of client secrets. scope: string or list of strings, scope(s) of the credentials being requested. message: string, A friendly string to display to the user if the clientsecrets file is missing or invalid. The message may contain HTML and will be presented on the web interface for any method that uses the decorator. cache: An optional cache service client that implements get() and set() methods. See clientsecrets.loadfile() for details. Returns: An OAuth2Decorator """ return OAuth2DecoratorFromClientSecrets(filename, scope, message=message, cache=cache)
0.001042
def all_modules_subpattern(): u""" Builds a pattern for all toplevel names (urllib, http, etc) """ names_dot_attrs = [mod.split(u".") for mod in MAPPING] ret = u"( " + u" | ".join([dotted_name % (simple_name % (mod[0]), simple_attr % (mod[1])) for mod in names_dot_attrs]) ret += u" | " ret += u" | ".join([simple_name % (mod[0]) for mod in names_dot_attrs if mod[1] == u"__init__"]) + u" )" return ret
0.008299
def deserialize(self, node: SchemaNode, cstruct: Union[str, ColanderNullType]) \ -> Optional[Pendulum]: """ Deserializes string representation to Python object. """ if not cstruct: return colander.null try: result = coerce_to_pendulum(cstruct, assume_local=self.use_local_tz) except (ValueError, ParserError) as e: raise Invalid(node, "Invalid date/time: value={!r}, error=" "{!r}".format(cstruct, e)) return result
0.006339
def setExpanded( self, state ): """ Sets whether or not this rollout is in the expanded state. :param state | <bool> """ self._expanded = state self._widget.setVisible(state) if ( state ): ico = projexui.resources.find('img/treeview/triangle_down.png') else: ico = projexui.resources.find('img/treeview/triangle_right.png') self._titleButton.setIcon(QIcon(ico)) # emit the signals for this widget rollout = self.rolloutWidget() if ( not rollout.signalsBlocked() ): index = rollout.widget().layout().indexOf(self) rollout.itemCollapsed.emit(index) rollout.itemExpanded.emit(index)
0.016477
def pauseProducing(self): """ Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused. """ if not self._running: return # Exit the read loop and cancel the consumer on the server. self._running = False for consumer in self._consumers.values(): yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) _legacy_twisted_log.msg("Paused retrieval of messages for the server queue")
0.004399
def data(self): """bytes: value data as a byte string. Raises: WinRegistryValueError: if the value data cannot be read. """ try: return self._pyregf_value.data except IOError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception))
0.007692
def create_driver_script(driver, script_create=None): # noqa: E501 """Create a new script Create a new script # noqa: E501 :param driver: The driver to use for the request. ie. github :type driver: str :param script_create: The data needed to create this script :type script_create: dict | bytes :rtype: Response """ if connexion.request.is_json: script_create = ScriptCreate.from_dict(connexion.request.get_json()) # noqa: E501 response = errorIfUnauthorized(role='developer') if response: return response else: response = ApitaxResponse() driver: Driver = LoadedDrivers.getDriver(driver) driver.saveDriverScript(script_create.script.name, script_create.script.content) return Response(status=200, body=response.getResponseBody())
0.00243
def _record_revisit(self, payload_offset: int): '''Record the revisit if possible.''' fields = self._response_record.fields ref_record_id = self._url_table.get_revisit_id( fields['WARC-Target-URI'], fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '') ) if ref_record_id: try: self._response_record.block_file.truncate(payload_offset) except TypeError: self._response_record.block_file.seek(0) data = self._response_record.block_file.read(payload_offset) self._response_record.block_file.truncate() self._response_record.block_file.seek(0) self._response_record.block_file.write(data) self._recorder.set_length_and_maybe_checksums( self._response_record ) fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT fields['WARC-Refers-To'] = ref_record_id fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI fields['WARC-Truncated'] = 'length'
0.001764
def write_bed_with_trackline(bed, out, trackline, add_chr=False): """ Read a bed file and write a copy with a trackline. Here's a simple trackline example: 'track type=bed name="cool" description="A cool track."' Parameters ---------- bed : str Input bed file name. out : str Output bed file name. trackline : str UCSC trackline. add_chr : boolean Add 'chr' to the chromosomes in the input file. Necessary for UCSC genome browser if not present. """ df = pd.read_table(bed, index_col=None, header=None) bt = pbt.BedTool('\n'.join(df.apply(lambda x: '\t'.join(x.astype(str)), axis=1)) + '\n', from_string=True) if add_chr: bt = add_chr_to_contig(bt) bt = bt.saveas(out, trackline=trackline)
0.004662
def evaluate_block(self, comments): """Evaluate block comments.""" if self.jsdocs: m1 = RE_JSDOC.match(comments) if m1: lines = [] for line in m1.group(1).splitlines(True): l = line.lstrip() lines.append(l[1:] if l.startswith('*') else l) self.jsdoc_comments.append([''.join(lines), self.line_num, self.current_encoding]) elif self.blocks: self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding]) elif self.blocks: self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding])
0.008571
def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_text(strings_only=True). """ return isinstance(obj, six.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time))
0.00597
def main(self): """ Main entry point :return: """ parser = self.init_parser() if len(sys.argv) < 2: parser.print_usage() sys.exit(0) self.args = parser.parse_args() self.roca.args.flatten = self.args.flatten self.roca.args.indent = self.args.indent if self.args.debug: coloredlogs.install(level=logging.DEBUG) self.roca.args.debug = True self.work()
0.004098
def fit(self, t, y, dy=None): """Fit the multiterm Periodogram model to the data. Parameters ---------- t : array_like, one-dimensional sequence of observation times y : array_like, one-dimensional sequence of observed values dy : float or array_like (optional) errors on observed values """ # For linear models, dy=1 is equivalent to no errors if dy is None: dy = 1 self.t, self.y, self.dy = np.broadcast_arrays(t, y, dy) self._fit(self.t, self.y, self.dy) self._best_period = None # reset best period in case of refitting if self.fit_period: self._best_period = self._calc_best_period() return self
0.003797
def plot_histogram(data, figsize=(7, 5), color=None, number_to_keep=None, sort='asc', target_string=None, legend=None, bar_labels=True, title=None): """Plot a histogram of data. Args: data (list or dict): This is either a list of dictionaries or a single dict containing the values to represent (ex {'001': 130}) figsize (tuple): Figure size in inches. color (list or str): String or list of strings for histogram bar colors. number_to_keep (int): The number of terms to plot and rest is made into a single bar called 'rest'. sort (string): Could be 'asc', 'desc', or 'hamming'. target_string (str): Target string if 'sort' is a distance measure. legend(list): A list of strings to use for labels of the data. The number of entries must match the length of data (if data is a list or 1 if it's a dict) bar_labels (bool): Label each bar in histogram with probability value. title (str): A string to use for the plot title Returns: matplotlib.Figure: A figure for the rendered histogram. Raises: ImportError: Matplotlib not available. VisualizationError: When legend is provided and the length doesn't match the input data. """ if not HAS_MATPLOTLIB: raise ImportError('Must have Matplotlib installed.') if sort not in VALID_SORTS: raise VisualizationError("Value of sort option, %s, isn't a " "valid choice. Must be 'asc', " "'desc', or 'hamming'") elif sort in DIST_MEAS.keys() and target_string is None: err_msg = 'Must define target_string when using distance measure.' raise VisualizationError(err_msg) if isinstance(data, dict): data = [data] if legend and len(legend) != len(data): raise VisualizationError("Length of legendL (%s) doesn't match " "number of input executions: %s" % (len(legend), len(data))) fig, ax = plt.subplots(figsize=figsize) labels = list(sorted( functools.reduce(lambda x, y: x.union(y.keys()), data, set()))) if number_to_keep is not None: labels.append('rest') if sort in DIST_MEAS.keys(): dist = [] for item in labels: dist.append(DIST_MEAS[sort](item, target_string)) labels = [list(x) for x in zip(*sorted(zip(dist, labels), key=lambda pair: pair[0]))][1] labels_dict = OrderedDict() # Set bar colors if color is None: color = ['#648fff', '#dc267f', '#785ef0', '#ffb000', '#fe6100'] elif isinstance(color, str): color = [color] all_pvalues = [] length = len(data) for item, execution in enumerate(data): if number_to_keep is not None: data_temp = dict(Counter(execution).most_common(number_to_keep)) data_temp["rest"] = sum(execution.values()) - sum(data_temp.values()) execution = data_temp values = [] for key in labels: if key not in execution: if number_to_keep is None: labels_dict[key] = 1 values.append(0) else: values.append(-1) else: labels_dict[key] = 1 values.append(execution[key]) values = np.array(values, dtype=float) where_idx = np.where(values >= 0)[0] pvalues = values[where_idx] / sum(values[where_idx]) for value in pvalues: all_pvalues.append(value) numelem = len(values[where_idx]) ind = np.arange(numelem) # the x locations for the groups width = 1/(len(data)+1) # the width of the bars rects = [] for idx, val in enumerate(pvalues): label = None if not idx and legend: label = legend[item] if val >= 0: rects.append(ax.bar(idx+item*width, val, width, label=label, color=color[item % len(color)], zorder=2)) bar_center = (width / 2) * (length - 1) ax.set_xticks(ind + bar_center) ax.set_xticklabels(labels_dict.keys(), fontsize=14, rotation=70) # attach some text labels if bar_labels: for rect in rects: for rec in rect: height = rec.get_height() if height >= 1e-3: ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height, '%.3f' % float(height), ha='center', va='bottom', zorder=3) else: ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height, '0', ha='center', va='bottom', zorder=3) # add some text for labels, title, and axes ticks ax.set_ylabel('Probabilities', fontsize=14) ax.set_ylim([0., min([1.2, max([1.2 * val for val in all_pvalues])])]) if sort == 'desc': ax.invert_xaxis() ax.yaxis.set_major_locator(MaxNLocator(5)) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ax.set_facecolor('#eeeeee') plt.grid(which='major', axis='y', zorder=0, linestyle='--') if title: plt.title(title) if legend: ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.0), ncol=1, borderaxespad=0, frameon=True, fontsize=12) if fig: plt.close(fig) return fig
0.000868
def getUniqueFeaturesLocationsInObject(self, name): """ Return two sets. The first set contains the unique locations Ids in the object. The second set contains the unique feature Ids in the object. """ uniqueFeatures = set() uniqueLocations = set() for pair in self.objects[name]: uniqueLocations = uniqueLocations.union({pair[0]}) uniqueFeatures = uniqueFeatures.union({pair[1]}) return uniqueLocations, uniqueFeatures
0.006466
def add_dependent_assembly(self, manifestVersion=None, noInheritable=False, noInherit=False, type_=None, name=None, language=None, processorArchitecture=None, version=None, publicKeyToken=None, description=None, requestedExecutionLevel=None, uiAccess=None, dependentAssemblies=None, files=None, comInterfaceExternalProxyStubs=None): """ Shortcut for self.dependentAssemblies.append(Manifest(*args, **kwargs)) """ self.dependentAssemblies.append(Manifest(manifestVersion, noInheritable, noInherit, type_, name, language, processorArchitecture, version, publicKeyToken, description, requestedExecutionLevel, uiAccess, dependentAssemblies, files, comInterfaceExternalProxyStubs)) if self.filename: # Enable search for private assembly by assigning bogus filename # (only the directory has to be correct) self.dependentAssemblies[-1].filename = ":".join((self.filename, name))
0.015385
def main(): """Parse the command-line arguments and run the bot.""" parser = argparse.ArgumentParser(description = 'XMPP echo bot', parents = [XMPPSettings.get_arg_parser()]) parser.add_argument('jid', metavar = 'JID', help = 'The bot JID') parser.add_argument('--debug', action = 'store_const', dest = 'log_level', const = logging.DEBUG, default = logging.INFO, help = 'Print debug messages') parser.add_argument('--quiet', const = logging.ERROR, action = 'store_const', dest = 'log_level', help = 'Print only error messages') parser.add_argument('--trace', action = 'store_true', help = 'Print XML data sent and received') args = parser.parse_args() settings = XMPPSettings({ "software_name": "Echo Bot" }) settings.load_arguments(args) if settings.get("password") is None: password = getpass("{0!r} password: ".format(args.jid)) if sys.version_info.major < 3: password = password.decode("utf-8") settings["password"] = password if sys.version_info.major < 3: args.jid = args.jid.decode("utf-8") logging.basicConfig(level = args.log_level) if args.trace: print "enabling trace" handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"): logger = logging.getLogger(logger) logger.setLevel(logging.DEBUG) logger.addHandler(handler) logger.propagate = False bot = EchoBot(JID(args.jid), settings) try: bot.run() except KeyboardInterrupt: bot.disconnect()
0.019038
def clean(self, *args, **kwargs): """ from_user and to_user must differ """ if self.from_user and self.from_user_id == self.to_user_id: raise ValidationError(_('A user cannot send a notification to herself/himself'))
0.012295
def patch_apply(self, patches, text): """Merge a set of patches onto the text. Return a patched text, as well as a list of true/false values indicating which patches were applied. Args: patches: Array of Patch objects. text: Old text. Returns: Two element Array, containing the new text and an array of boolean values. """ if not patches: return (text, []) # Deep copy the patches so that no changes are made to originals. patches = self.patch_deepCopy(patches) nullPadding = self.patch_addPadding(patches) text = nullPadding + text + nullPadding self.patch_splitMax(patches) # delta keeps track of the offset between the expected and actual location # of the previous patch. If there are patches expected at positions 10 and # 20, but the first patch was found at 12, delta is 2 and the second patch # has an effective expected position of 22. delta = 0 results = [] for patch in patches: expected_loc = patch.start2 + delta text1 = self.diff_text1(patch.diffs) end_loc = -1 if len(text1) > self.Match_MaxBits: # patch_splitMax will only provide an oversized pattern in the case of # a monster delete. start_loc = self.match_main(text, text1[:self.Match_MaxBits], expected_loc) if start_loc != -1: end_loc = self.match_main(text, text1[-self.Match_MaxBits:], expected_loc + len(text1) - self.Match_MaxBits) if end_loc == -1 or start_loc >= end_loc: # Can't find valid trailing context. Drop this patch. start_loc = -1 else: start_loc = self.match_main(text, text1, expected_loc) if start_loc == -1: # No match found. :( results.append(False) # Subtract the delta for this failed patch from subsequent patches. delta -= patch.length2 - patch.length1 else: # Found a match. :) results.append(True) delta = start_loc - expected_loc if end_loc == -1: text2 = text[start_loc : start_loc + len(text1)] else: text2 = text[start_loc : end_loc + self.Match_MaxBits] if text1 == text2: # Perfect match, just shove the replacement text in. text = (text[:start_loc] + self.diff_text2(patch.diffs) + text[start_loc + len(text1):]) else: # Imperfect match. # Run a diff to get a framework of equivalent indices. diffs = self.diff_main(text1, text2, False) if (len(text1) > self.Match_MaxBits and self.diff_levenshtein(diffs) / float(len(text1)) > self.Patch_DeleteThreshold): # The end points match, but the content is unacceptably bad. results[-1] = False else: self.diff_cleanupSemanticLossless(diffs) index1 = 0 for (op, data) in patch.diffs: if op != self.DIFF_EQUAL: index2 = self.diff_xIndex(diffs, index1) if op == self.DIFF_INSERT: # Insertion text = text[:start_loc + index2] + data + text[start_loc + index2:] elif op == self.DIFF_DELETE: # Deletion text = text[:start_loc + index2] + text[start_loc + self.diff_xIndex(diffs, index1 + len(data)):] if op != self.DIFF_DELETE: index1 += len(data) # Strip the padding off. text = text[len(nullPadding):-len(nullPadding)] return (text, results)
0.008438
def run_in_subprocess(code, filename_suffix, arguments, working_directory): """Return None on success.""" temporary_file = tempfile.NamedTemporaryFile(mode='wb', suffix=filename_suffix) temporary_file.write(code.encode('utf-8')) temporary_file.flush() process = subprocess.Popen(arguments + [temporary_file.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory) def run(): """Yield errors.""" raw_result = process.communicate() if process.returncode != 0: return (raw_result[1].decode(get_encoding()), temporary_file.name) return run
0.001274
def _rescanSizes(self, force=True): """ Zero and recalculate quota sizes to subvolume sizes will be correct. """ status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status logger.debug("CTL Status: %s", hex(status)) status = self.QUOTA_RESCAN_STATUS() logger.debug("RESCAN Status: %s", status) if not status.flags: if not force: return self.QUOTA_RESCAN() logger.warn("Waiting for btrfs quota usage scan...") self.QUOTA_RESCAN_WAIT()
0.005556
def load(filename): """Load variable from Pickle file Args: path (str): path of the file to load Returns: variable read from path """ fileObj = open(filename, 'rb') variable = pickle.load(fileObj) fileObj.close() return variable
0.007092
def leastsq_NxN(x, y, fit_offset=False, perc=None): """Solution to least squares: gamma = cov(X,Y) / var(X) """ if perc is not None: if not fit_offset and isinstance(perc, (list, tuple)): perc = perc[1] weights = csr_matrix(get_weight(x, y, perc)).astype(bool) x, y = weights.multiply(x).tocsr(), weights.multiply(y).tocsr() else: weights = None with warnings.catch_warnings(): warnings.simplefilter("ignore") xx_ = prod_sum_obs(x, x) xy_ = prod_sum_obs(x, y) if fit_offset: n_obs = x.shape[0] if weights is None else sum_obs(weights) x_ = sum_obs(x) / n_obs y_ = sum_obs(y) / n_obs gamma = (xy_ / n_obs - x_ * y_) / (xx_ / n_obs - x_ ** 2) offset = y_ - gamma * x_ # fix negative offsets: idx = offset < 0 gamma[idx] = xy_[idx] / xx_[idx] offset = np.clip(offset, 0, None) else: gamma = xy_ / xx_ offset = np.zeros(x.shape[1]) offset[np.isnan(offset)], gamma[np.isnan(gamma)] = 0, 0 return offset, gamma
0.001756
def _validate(self): """ Ensure that our expression string has variables of the form x_0, x_1, ... x_(N - 1), where N is the length of our inputs. """ variable_names, _unused = getExprNames(self._expr, {}) expr_indices = [] for name in variable_names: if name == 'inf': continue match = _VARIABLE_NAME_RE.match(name) if not match: raise ValueError("%r is not a valid variable name" % name) expr_indices.append(int(match.group(2))) expr_indices.sort() expected_indices = list(range(len(self.inputs))) if expr_indices != expected_indices: raise ValueError( "Expected %s for variable indices, but got %s" % ( expected_indices, expr_indices, ) ) super(NumericalExpression, self)._validate()
0.002148
def _sanitize_resources(cls, resources): """Loops over incoming data looking for base64 encoded data and converts them to a readable format.""" try: for resource in cls._loop_raw(resources): cls._sanitize_resource(resource) except (KeyError, TypeError): _LOGGER.debug("no shade data available") return None
0.005115
def draw_text(data, obj): """Paints text on the graph. """ content = [] properties = [] style = [] if isinstance(obj, mpl.text.Annotation): _annotation(obj, data, content) # 1: coordinates # 2: properties (shapes, rotation, etc) # 3: text style # 4: the text # -------1--------2---3--4-- pos = obj.get_position() # from .util import transform_to_data_coordinates # pos = transform_to_data_coordinates(obj, *pos) text = obj.get_text() if text in ["", data["current axis title"]]: # Text nodes which are direct children of Axes are typically titles. They are # already captured by the `title` property of pgfplots axes, so skip them here. return data, content size = obj.get_size() bbox = obj.get_bbox_patch() converter = mpl.colors.ColorConverter() # without the factor 0.5, the fonts are too big most of the time. # TODO fix this scaling = 0.5 * size / data["font size"] ff = data["float format"] if scaling != 1.0: properties.append(("scale=" + ff).format(scaling)) if bbox is not None: _bbox(bbox, data, properties, scaling) ha = obj.get_ha() va = obj.get_va() anchor = _transform_positioning(ha, va) if anchor is not None: properties.append(anchor) data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color())) properties.append("text={}".format(col)) properties.append("rotate={:.1f}".format(obj.get_rotation())) if obj.get_style() == "italic": style.append("\\itshape") else: assert obj.get_style() == "normal" # From matplotlib/font_manager.py: # weight_dict = { # 'ultralight' : 100, # 'light' : 200, # 'normal' : 400, # 'regular' : 400, # 'book' : 400, # 'medium' : 500, # 'roman' : 500, # 'semibold' : 600, # 'demibold' : 600, # 'demi' : 600, # 'bold' : 700, # 'heavy' : 800, # 'extra bold' : 800, # 'black' : 900} # # get_weights returns a numeric value in the range 0-1000 or one of # ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’, # ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’ weight = obj.get_weight() if weight in [ "semibold", "demibold", "demi", "bold", "heavy", "extra bold", "black", ] or (isinstance(weight, int) and weight > 550): style.append("\\bfseries") # \lfseries isn't that common yet # elif weight == 'light' or (isinstance(weight, int) and weight < 300): # style.append('\\lfseries') if obj.axes: # If the coordinates are relative to an axis, use `axis cs`. tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos) else: # relative to the entire figure, it's a getting a littler harder. See # <http://tex.stackexchange.com/a/274902/13262> for a solution to the # problem: tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ).format(*pos) if "\n" in text: # http://tex.stackexchange.com/a/124114/13262 properties.append("align={}".format(ha)) # Manipulating the text here is actually against mpl2tikz's policy not # to do that. On the other hand, newlines should translate into # newlines. # We might want to remove this here in the future. text = text.replace("\n ", "\\\\") content.append( "\\node at {}[\n {}\n]{{{}}};\n".format( tikz_pos, ",\n ".join(properties), " ".join(style + [text]) ) ) return data, content
0.001001
def __Script_Editor_Output_plainTextEdit_contextMenuEvent(self, event): """ Reimplements the :meth:`QPlainTextEdit.contextMenuEvent` method. :param event: QEvent. :type event: QEvent """ menu = self.Script_Editor_Output_plainTextEdit.createStandardContextMenu() menu.addSeparator() menu.addAction(self.__engine.actions_manager.register_action( "Actions|Umbra|Components|factory.script_editor|Edit Selected Path", slot=self.__edit_selected_path_action__triggered)) menu.exec_(event.globalPos())
0.006768
def recentEvents(self): ''' Get the set of recent and upcoming events to which this list applies. ''' return Event.objects.filter( Q(pk__in=self.individualEvents.values_list('pk',flat=True)) | Q(session__in=self.eventSessions.all()) | Q(publicevent__category__in=self.eventCategories.all()) | Q(series__category__in=self.seriesCategories.all()) ).filter( Q(startTime__lte=timezone.now() + timedelta(days=60)) & Q(endTime__gte=timezone.now() - timedelta(days=60)) )
0.005137
async def _submit(self, req_json: str) -> str: """ Submit (json) request to ledger; return (json) result. Raise AbsentPool for no pool, ClosedPool if pool is not yet open, or BadLedgerTxn on failure. :param req_json: json of request to sign and submit :return: json response """ LOGGER.debug('BaseAnchor._submit >>> req_json: %s', req_json) if not self.pool: LOGGER.debug('BaseAnchor._submit <!< absent pool') raise AbsentPool('Cannot submit request: absent pool') if not self.pool.handle: LOGGER.debug('BaseAnchor._submit <!< closed pool %s', self.pool.name) raise ClosedPool('Cannot submit request to closed pool {}'.format(self.pool.name)) rv_json = await ledger.submit_request(self.pool.handle, req_json) await asyncio.sleep(0) resp = json.loads(rv_json) if resp.get('op', '') in ('REQNACK', 'REJECT'): LOGGER.debug('BaseAnchor._submit <!< ledger rejected request: %s', resp['reason']) raise BadLedgerTxn('Ledger rejected transaction request: {}'.format(resp['reason'])) LOGGER.debug('BaseAnchor._submit <<< %s', rv_json) return rv_json
0.005645
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ if self.parenthesis: return '{}({})'.format(' ' * indent, pretty_str(self.value)) return pretty_str(self.value, indent=indent)
0.005587
def unzip(self, payload): """ Unzips a file :param payload: zip_with_rel_path: string remove_original_zip: boolean :return: (object) unzipped_path: string """ zip_with_rel_path = payload.pop('zip_with_rel_path') url = "{url_base}/resource/{pid}/functions/unzip/{path}/".format( url_base=self.hs.url_base, path=zip_with_rel_path, pid=self.pid) r = self.hs._request('POST', url, None, payload) return r
0.003552
def list_api_keys(self, **kwargs): """List the API keys registered in the organisation. List api keys Example: .. code-block:: python account_management_api = AccountManagementAPI() # List api keys api_keys_paginated_response = account_management_api.list_api_keys() # get single api key api_keys_paginated_response.data[0] :param int limit: Number of API keys to get :param str after: Entity ID after which to start fetching :param str order: Order of the records to return (asc|desc) :param dict filters: Dictionary of filters to apply: str owner (eq) :returns: a list of :class:`ApiKey` objects :rtype: PaginatedResponse :raises: ApiException """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, ApiKey) api = self._get_api(iam.DeveloperApi) # Return the data array return PaginatedResponse(api.get_all_api_keys, lwrap_type=ApiKey, **kwargs)
0.003735
def raw_chroma_accuracy(ref_voicing, ref_cent, est_voicing, est_cent, cent_tolerance=50): """Compute the raw chroma accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considered correct (Default value = 50) Returns ------- raw_chroma : float Raw chroma accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents), ignoring octave errors References ---------- .. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction from Polyphonic Music Signals: Approaches, Applications and Challenges", IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014. .. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S. Streich, and B. Ong. "Melody transcription from music audio: Approaches and evaluation", IEEE Transactions on Audio, Speech, and Language Processing, 15(4):1247-1256, 2007. """ validate_voicing(ref_voicing, est_voicing) validate(ref_voicing, ref_cent, est_voicing, est_cent) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) # When input arrays are empty, return 0 by special case if ref_voicing.size == 0 or est_voicing.size == 0 \ or ref_cent.size == 0 or est_cent.size == 0: return 0. # If there are no voiced frames in reference, metric is 0 if ref_voicing.sum() == 0: return 0. # Raw chroma = same as raw pitch except that octave errors are ignored. cent_diff = np.abs(ref_cent - est_cent) octave = 1200*np.floor(cent_diff/1200.0 + 0.5) matching_voicing = ref_voicing * (est_cent > 0) cent_diff = np.abs(cent_diff - octave)[matching_voicing] frame_correct = (cent_diff < cent_tolerance) n_voiced = float(ref_voicing.sum()) raw_chroma = (frame_correct).sum()/n_voiced return raw_chroma
0.000314
def add_app_template_global(self, func: Callable, name: Optional[str]=None) -> None: """Add an application wide template global. This is designed to be used on the blueprint directly, and has the same arguments as :meth:`~quart.Quart.add_template_global`. An example usage, .. code-block:: python def global(): ... blueprint = Blueprint(__name__) blueprint.add_app_template_global(global) """ self.record_once(lambda state: state.register_template_global(func, name))
0.010363
def transactional(wrapped): """ A decorator to denote that the content of the decorated function or method is to be ran in a transaction. The following code is equivalent to the example for :py:func:`dbkit.transaction`:: import sqlite3 import sys from dbkit import connect, transactional, query_value, execute # ...do some stuff... with connect(sqlite3, '/path/to/my.db') as ctx: try: change_ownership(page_id, new_owner_id) catch ctx.IntegrityError: print >> sys.stderr, "Naughty!" @transactional def change_ownership(page_id, new_owner_id): old_owner_id = query_value( "SELECT owner_id FROM pages WHERE page_id = ?", (page_id,)) execute( "UPDATE users SET owned = owned - 1 WHERE id = ?", (old_owner_id,)) execute( "UPDATE users SET owned = owned + 1 WHERE id = ?", (new_owner_id,)) execute( "UPDATE pages SET owner_id = ? WHERE page_id = ?", (new_owner_id, page_id)) """ # pylint: disable-msg=C0111 def wrapper(*args, **kwargs): with Context.current().transaction(): return wrapped(*args, **kwargs) return functools.update_wrapper(wrapper, wrapped)
0.000714