text
stringlengths
78
104k
score
float64
0
0.18
def _fromData(cls, header, tflags, data): """Construct this ID3 frame from raw string data. Raises: ID3JunkFrameError in case parsing failed NotImplementedError in case parsing isn't implemented ID3EncryptionUnsupportedError in case the frame is encrypted. """ if header.version >= header._V24: if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN): # The data length int is syncsafe in 2.4 (but not 2.3). # However, we don't actually need the data length int, # except to work around a QL 0.12 bug, and in that case # all we need are the raw bytes. datalen_bytes = data[:4] data = data[4:] if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch: try: data = unsynch.decode(data) except ValueError: # Some things write synch-unsafe data with either the frame # or global unsynch flag set. Try to load them as is. # https://github.com/quodlibet/mutagen/issues/210 # https://github.com/quodlibet/mutagen/issues/223 pass if tflags & Frame.FLAG24_ENCRYPT: raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG24_COMPRESS: try: data = zlib.decompress(data) except zlib.error: # the initial mutagen that went out with QL 0.12 did not # write the 4 bytes of uncompressed size. Compensate. data = datalen_bytes + data try: data = zlib.decompress(data) except zlib.error as err: raise ID3JunkFrameError( 'zlib: %s: %r' % (err, data)) elif header.version >= header._V23: if tflags & Frame.FLAG23_COMPRESS: usize, = unpack('>L', data[:4]) data = data[4:] if tflags & Frame.FLAG23_ENCRYPT: raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG23_COMPRESS: try: data = zlib.decompress(data) except zlib.error as err: raise ID3JunkFrameError('zlib: %s: %r' % (err, data)) frame = cls() frame._readData(header, data) return frame
0.000795
def validate_structure(reference_intervals, reference_labels, estimated_intervals, estimated_labels): """Checks that the input annotations to a structure estimation metric (i.e. one that takes in both segment boundaries and their labels) look like valid segment times and labels, and throws helpful errors if not. Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. """ for (intervals, labels) in [(reference_intervals, reference_labels), (estimated_intervals, estimated_labels)]: util.validate_intervals(intervals) if intervals.shape[0] != len(labels): raise ValueError('Number of intervals does not match number ' 'of labels') # Check only when intervals are non-empty if intervals.size > 0: # Make sure intervals start at 0 if not np.allclose(intervals.min(), 0.0): raise ValueError('Segment intervals do not start at 0') if reference_intervals.size == 0: warnings.warn("Reference intervals are empty.") if estimated_intervals.size == 0: warnings.warn("Estimated intervals are empty.") # Check only when intervals are non-empty if reference_intervals.size > 0 and estimated_intervals.size > 0: if not np.allclose(reference_intervals.max(), estimated_intervals.max()): raise ValueError('End times do not match')
0.000476
def length_prepend(byte_string): ''' bytes -> bytes ''' length = tx.VarInt(len(byte_string)) return length.to_bytes() + byte_string
0.006623
def delete(self, name=None): "Delete the shelve data file." logger.info('clearing shelve data') self.close() for path in Path(self.create_path.parent, self.create_path.name), \ Path(self.create_path.parent, self.create_path.name + '.db'): logger.debug(f'clearing {path} if exists: {path.exists()}') if path.exists(): path.unlink() break
0.006881
def release_api_class(self): """Github Release API class.""" cls = current_app.config['GITHUB_RELEASE_CLASS'] if isinstance(cls, string_types): cls = import_string(cls) assert issubclass(cls, GitHubRelease) return cls
0.007435
def checkattr(metacls, attr, value): """ Only allow class attributes that are instances of rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy """ if not isinstance(value, ( types.MethodType, types.FunctionType, classmethod, staticmethod, property)): if attr in dir(type('dummy', (object,), {})) + \ ['__metaclass__', '__qualname__']: return if attr.startswith('_'): raise SyntaxError( "TreeModel attribute `{0}` " "must not start with `_`".format(attr)) if not inspect.isclass(value): if not isinstance(value, Column): raise TypeError( "TreeModel attribute `{0}` " "must be an instance of " "`rootpy.tree.treetypes.Column`".format(attr)) return if not issubclass(value, (ROOT.TObject, ROOT.ObjectProxy)): raise TypeError( "TreeModel attribute `{0}` must inherit " "from `ROOT.TObject` or `ROOT.ObjectProxy`".format( attr))
0.00155
def raw_p_sha1(secret, seed, sizes=()): """ Derive one or more keys from secret and seed. (See specs part 6, 6.7.5 and RFC 2246 - TLS v1.0) Lengths of keys will match sizes argument Source: https://github.com/FreeOpcUa/python-opcua key_sizes = (signature_key_size, symmetric_key_size, 16) (sigkey, key, init_vec) = p_sha1(nonce2, nonce1, key_sizes) """ full_size = 0 for size in sizes: full_size += size result = b'' accum = seed while len(result) < full_size: accum = hmac_sha1(secret, accum) result += hmac_sha1(secret, accum + seed) parts = [] for size in sizes: parts.append(result[:size]) result = result[size:] return tuple(parts)
0.001346
async def setup_streamer(self): """Sets up basic defaults for the streamer""" self.streamer.volume = self.volume / 100 self.streamer.start() self.pause_time = None self.vclient_starttime = self.vclient.loop.time() # Cache next song self.logger.debug("Caching next song") dl_thread = threading.Thread(target=self.download_next_song_cache) dl_thread.start()
0.004662
def command(self): '''Command used to launch this application module''' cmd = self.config.get('command', None) if cmd is None: return cmd = cmd[platform] return cmd['path'], cmd['args']
0.008368
def orient_undirected_graph(self, data, umg, alg='HC'): """Orient the undirected graph using GNN and apply CGNN to improve the graph. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. umg (nx.Graph): Graph that provides the skeleton, on which the GNN then the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. .. note:: GNN (``cdt.causality.pairwise.GNN``) is first used to orient the undirected graph and output a DAG before applying CGNN. """ warnings.warn("The pairwise GNN model is computed on each edge of the UMG " "to initialize the model and start CGNN with a DAG") gnn = GNN(nh=self.nh, lr=self.lr) og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method # print(nx.adj_matrix(og).todense().shape) # print(list(og.edges())) dag = dagify_min_edge(og) # print(nx.adj_matrix(dag).todense().shape) return self.orient_directed_graph(data, dag, alg=alg)
0.005376
def get_package_update_list(package_name, version): """ Return update information of a package from a given version :param package_name: string :param version: string :return: dict """ package_version = semantic_version.Version.coerce(version) # Get package and version data from pypi package_data = get_pypi_package_data(package_name) version_data = get_pypi_package_data(package_name, version) # Current release specific information current_release = '' current_release_license = '' # Latest release specific information latest_release = '' latest_release_license = '' # Information about packages major_updates = [] minor_updates = [] patch_updates = [] pre_releases = [] non_semantic_versions = [] if package_data: latest_release = package_data['info']['version'] latest_release_license = package_data['info']['license'] if package_data['info']['license'] else '' for release, info in package_data['releases'].items(): parsed_release = parse(release) upload_time = None if info: upload_time = datetime.strptime(info[0]['upload_time'], "%Y-%m-%dT%H:%M:%S") try: # Get semantic version of package release_version = semantic_version.Version.coerce(release) if not parsed_release.is_prerelease: # Place package in the appropriate semantic visioning list if release_version in semantic_version.Spec(">=%s" % package_version.next_major()): major_updates.append({ 'version': release, 'upload_time': upload_time, }) elif release_version in semantic_version.Spec(">=%s,<%s" % (package_version.next_minor(), package_version.next_major())): minor_updates.append({ 'version': release, 'upload_time': upload_time, }) elif release_version in semantic_version.Spec(">=%s,<%s" % (package_version.next_patch(), package_version.next_minor())): patch_updates.append({ 'version': release, 'upload_time': upload_time, }) else: pre_releases.append({ 'version': release, 'upload_time': upload_time }) except ValueError: # Keep track of versions that could not be recognized as semantic non_semantic_versions.append({'version': release, 'upload_time': upload_time}) if version_data: current_release = version_data['info']['version'] current_release_license = version_data['info']['license'] if version_data['info']['license'] else '' # Get number of newer releases available for the given package newer_releases = len(major_updates + minor_updates + patch_updates) return { 'current_release': current_release, 'current_release_license': current_release_license, 'latest_release': latest_release, 'latest_release_license': latest_release_license, 'newer_releases': newer_releases, 'pre_releases': len(pre_releases), 'major_updates': sorted(major_updates, key=lambda x: semantic_version.Version.coerce(x['version']), reverse=True), 'minor_updates': sorted(minor_updates, key=lambda x: semantic_version.Version.coerce(x['version']), reverse=True), 'patch_updates': sorted(patch_updates, key=lambda x: semantic_version.Version.coerce(x['version']), reverse=True), 'pre_release_updates': sorted(pre_releases, key=lambda x: semantic_version.Version.coerce(x['version']), reverse=True), 'non_semantic_versions': non_semantic_versions, }
0.003239
def editComment(self, repo_user, repo_name, comment_id, body): """ PATCH /repos/:owner/:repo/pulls/comments/:id :param comment_id: The ID of the comment to edit :param body: The new body of the comment. """ return self.api.makeRequest( ["repos", repo_user, repo_name, "pulls", "comments", str(comment_id)], method="POST", data=dict(body=body))
0.004535
def merge_entity(self, table_name, entity, if_match='*', timeout=None): ''' Updates an existing entity by merging the entity's properties. Throws if the entity does not exist. This operation does not replace the existing entity as the update_entity operation does. A property cannot be removed with merge_entity. Any properties with null values are ignored. All other properties will be updated or added. :param str table_name: The name of the table containing the entity to merge. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _merge_entity(entity, if_match, self.require_encryption, self.key_encryption_key) request.host_locations = self._get_host_locations() request.query['timeout'] = _int_to_str(timeout) request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) return self._perform_request(request, _extract_etag)
0.00859
def enrich(self, column1, column2): """ This class splits those commits where column1 and column2 values are different :param column1: column to compare to column2 :param column2: column to compare to column1 :type column1: string :type column2: string :returns: self.commits with duplicated rows where the values at columns are different. The original row remains while the second row contains in column1 and 2 the value of column2. :rtype: pandas.DataFrame """ if column1 not in self.commits.columns or \ column2 not in self.commits.columns: return self.commits # Select rows where values in column1 are different from # values in column2 pair_df = self.commits[self.commits[column1] != self.commits[column2]] new_values = list(pair_df[column2]) # Update values from column2 pair_df[column1] = new_values # This adds at the end of the original dataframe those rows duplicating # information and updating the values in column1 return self.commits.append(pair_df)
0.001714
def user_cache_dir(appname): # type: (str) -> str r""" Return full path to the user-specific cache dir for this application. "appname" is the name of application. Typical user cache directories are: macOS: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # When using Python 2, return paths as bytes on Windows like we do on # other operating systems. See helper function docs for more details. if PY2 and isinstance(path, text_type): path = _win_path_to_bytes(path) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path
0.000574
def write_preferences_file(self): """ Write json preferences file to (platform specific) user data directory, or PmagPy directory if appdirs module is missing. """ user_data_dir = find_pmag_dir.find_user_data_dir("thellier_gui") if not os.path.exists(user_data_dir): find_pmag_dir.make_user_data_dir(user_data_dir) pref_file = os.path.join(user_data_dir, "thellier_gui_preferences.json") with open(pref_file, "w+") as pfile: print('-I- writing preferences to {}'.format(pref_file)) json.dump(self.preferences, pfile)
0.00487
def getTrackedDeviceIndexForControllerRole(self, unDeviceType): """Returns the device index associated with a specific role, for example the left hand or the right hand. This function is deprecated in favor of the new IVRInput system.""" fn = self.function_table.getTrackedDeviceIndexForControllerRole result = fn(unDeviceType) return result
0.008021
def _call_connection_lost_and_clean_up(self, exc): """ Clean up all resources and call the protocols connection lost method. """ self._state = _State.CLOSED try: self._protocol.connection_lost(exc) finally: self._rawsock.close() if self._tls_conn is not None: self._tls_conn.set_app_data(None) self._tls_conn = None self._rawsock = None self._protocol = None self._loop = None
0.003781
def topic_inject(self, topic_name, _msg_content=None, **kwargs): """ Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(topic_name, unicode): topic_name = unicodedata.normalize('NFKD', topic_name).encode('ascii', 'ignore') if _msg_content is not None: # logging.warn("injecting {msg} into {topic}".format(msg=_msg_content, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, _msg_content,)) else: # default kwargs is {} # logging.warn("injecting {msg} into {topic}".format(msg=kwargs, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, kwargs,)) return res is None
0.008467
def get_integer_index( miller_index: bool, round_dp: int = 4, verbose: bool = True ) -> Tuple[int, int, int]: """ Attempt to convert a vector of floats to whole numbers. Args: miller_index (list of float): A list miller indexes. round_dp (int, optional): The number of decimal places to round the miller index to. verbose (bool, optional): Whether to print warnings. Returns: (tuple): The Miller index. """ miller_index = np.asarray(miller_index) # deal with the case we have small irregular floats # that are all equal or factors of each other miller_index /= min([m for m in miller_index if m != 0]) miller_index /= np.max(np.abs(miller_index)) # deal with the case we have nice fractions md = [Fraction(n).limit_denominator(12).denominator for n in miller_index] miller_index *= reduce(lambda x, y: x * y, md) int_miller_index = np.int_(np.round(miller_index, 1)) miller_index /= np.abs(reduce(gcd, int_miller_index)) # round to a reasonable precision miller_index = np.array([round(h, round_dp) for h in miller_index]) # need to recalculate this after rounding as values may have changed int_miller_index = np.int_(np.round(miller_index, 1)) if np.any(np.abs(miller_index - int_miller_index) > 1e-6) and verbose: warnings.warn("Non-integer encountered in Miller index") else: miller_index = int_miller_index # minimise the number of negative indexes miller_index += 0 # converts -0 to 0 def n_minus(index): return len([h for h in index if h < 0]) if n_minus(miller_index) > n_minus(miller_index * -1): miller_index *= -1 # if only one index is negative, make sure it is the smallest # e.g. (-2 1 0) -> (2 -1 0) if ( sum(miller_index != 0) == 2 and n_minus(miller_index) == 1 and abs(min(miller_index)) > max(miller_index) ): miller_index *= -1 return tuple(miller_index)
0.000496
def determine_target_roots(self, goal_name): """Helper for tasks that scan for default target roots. :param string goal_name: The goal name to use for any warning emissions. """ if not self.context.target_roots: print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr) # For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior # of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result. return self.context.target_roots
0.008977
def upload_as_zip(name, filename): """ Upload an IPList as a zip file. Useful when IPList is very large. This is the default upload format for IPLists. :param str name: name of IPList :param str filename: name of zip file to upload, full path :return: None """ location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.upload(filename=filename)
0.002315
def gen_input_add(sig_dic): ''' Adding for HTML Input control. ''' if sig_dic['en'] == 'tag_file_download': html_str = HTML_TPL_DICT['input_add_download'].format( sig_en=sig_dic['en'], sig_zh=sig_dic['zh'], sig_dic=sig_dic['dic'][1], sig_type=sig_dic['type'] ) else: html_str = HTML_TPL_DICT['input_add'].format( sig_en=sig_dic['en'], sig_zh=sig_dic['zh'], sig_dic=sig_dic['dic'][1], sig_type=sig_dic['type'] ) return html_str
0.00173
def get(self, count, offset=0, _options=None): ''' Fetch all Plaid institutions, using /institutions/all. :param int count: Number of institutions to fetch. :param int offset: Number of institutions to skip. ''' options = _options or {} return self.client.post('/institutions/get', { 'count': count, 'offset': offset, 'options': options, })
0.004329
def string_sanitize(string, tab_width=8): r""" strips, and replaces non-printable characters :param tab_width: number of spaces to replace tabs with. Read from `globals.tabwidth` setting if `None` :type tab_width: int or `None` >>> string_sanitize(' foo\rbar ', 8) ' foobar ' >>> string_sanitize('foo\tbar', 8) 'foo bar' >>> string_sanitize('foo\t\tbar', 8) 'foo bar' """ string = string.replace('\r', '') lines = list() for line in string.split('\n'): tab_count = line.count('\t') if tab_count > 0: line_length = 0 new_line = list() for i, chunk in enumerate(line.split('\t')): line_length += len(chunk) new_line.append(chunk) if i < tab_count: next_tab_stop_in = tab_width - (line_length % tab_width) new_line.append(' ' * next_tab_stop_in) line_length += next_tab_stop_in lines.append(''.join(new_line)) else: lines.append(line) return '\n'.join(lines)
0.000868
def run(self): """Connect to SABnzbd and get the data.""" try: answer = urlopen(self.url + "&mode=queue").read().decode() except (HTTPError, URLError) as error: self.output = { "full_text": str(error.reason), "color": "#FF0000" } return answer = json.loads(answer) # if answer["status"] exists and is False, an error occured if not answer.get("status", True): self.output = { "full_text": answer["error"], "color": "#FF0000" } return queue = answer["queue"] self.status = queue["status"] if self.is_paused(): color = self.color_paused elif self.is_downloading(): color = self.color_downloading else: color = self.color if self.is_downloading(): full_text = self.format.format(**queue) else: full_text = self.format_paused.format(**queue) self.output = { "full_text": full_text, "color": color }
0.001736
def xfrange(start, stop, step=1, maxSize=-1): """ Returns a generator that yields the frames from start to stop, inclusive. In other words it adds or subtracts a frame, as necessary, to return the stop value as well, if the stepped range would touch that value. Args: start (int): stop (int): step (int): Note that the sign will be ignored maxSize (int): Returns: generator: Raises: :class:`fileseq.exceptions.MaxSizeException`: if size is exceeded """ if start <= stop: stop, step = stop + 1, abs(step) else: stop, step = stop - 1, -abs(step) if maxSize >= 0: size = lenRange(start, stop, step) if size > maxSize: raise exceptions.MaxSizeException( "Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize)) # because an xrange is an odd object all its own, we wrap it in a # generator expression to get a proper Generator return (f for f in xrange(start, stop, step))
0.000971
def git_add_commit_push_all_repos(cat): """Add all files in each data repository tree, commit, push. Creates a commit message based on the current catalog version info. If either the `git add` or `git push` commands fail, an error will be raised. Currently, if `commit` fails an error *WILL NOT* be raised because the `commit` command will return a nonzero exit status if there are no files to add... which we dont want to raise an error. FIX: improve the error checking on this. """ log = cat.log log.debug("gitter.git_add_commit_push_all_repos()") # Do not commit/push private repos all_repos = cat.PATHS.get_all_repo_folders(private=False) for repo in all_repos: log.info("Repo in: '{}'".format(repo)) # Get the initial git SHA sha_beg = get_sha(repo) log.debug("Current SHA: '{}'".format(sha_beg)) # Get files that should be added, compress and check sizes add_files = cat._prep_git_add_file_list(repo, cat.COMPRESS_ABOVE_FILESIZE) log.info("Found {} Files to add.".format(len(add_files))) if len(add_files) == 0: continue try: # Add all files in the repository directory tree git_comm = ["git", "add"] if cat.args.travis: git_comm.append("-f") git_comm.extend(add_files) _call_command_in_repo( git_comm, repo, cat.log, fail=True, log_flag=False) # Commit these files commit_msg = "'push' - adding all files." commit_msg = "{} : {}".format(cat._version_long, commit_msg) log.info(commit_msg) git_comm = ["git", "commit", "-am", commit_msg] _call_command_in_repo(git_comm, repo, cat.log) # Add all files in the repository directory tree git_comm = ["git", "push"] if not cat.args.travis: _call_command_in_repo(git_comm, repo, cat.log, fail=True) except Exception as err: try: git_comm = ["git", "reset", "HEAD"] _call_command_in_repo(git_comm, repo, cat.log, fail=True) except: pass raise err return
0.000867
def check_compliance(self, function): """ Checks if a given `function` complies with this specification. If an inconsistency is detected a :py:exc:`MethodValidationError` exception is raised. .. note:: This method will not work as expected when `function` is an unbound method (``SomeClass.some_method``), as in Python 3 there is no way to recognize that this is in fact a method. Therefore, the implied `self` argument will not be ignored. :raises: :py:exc:`MethodValidationError` """ argument_specification = _get_argument_specification(function) if inspect.ismethod(function): # Remove implied `self` argument from specification if function is # a method. argument_specification = argument_specification._replace( args=argument_specification.args[1:] ) if argument_specification != self.argument_specification: raise MethodValidationError( function, self.argument_specification, argument_specification )
0.001689
def wait_for_page_to_load(self): """Wait for the page to load.""" self.wait.until(lambda _: self.loaded) self.pm.hook.pypom_after_wait_for_page_to_load(page=self) return self
0.009709
def process_file(filename: str, filetypes: List[str], move_to: str, delete_if_not_specified_file_type: bool, show_zip_output: bool) -> None: """ Deals with an OpenXML, including if it is potentially corrupted. Args: filename: filename to process filetypes: list of filetypes that we care about, e.g. ``['docx', 'pptx', 'xlsx']``. move_to: move matching files to this directory delete_if_not_specified_file_type: if ``True``, and the file is **not** a type specified in ``filetypes``, then delete the file. show_zip_output: show the output from the external ``zip`` tool? """ # log.critical("process_file: start") try: reader = CorruptedOpenXmlReader(filename, show_zip_output=show_zip_output) if reader.file_type in filetypes: log.info("Found {}: {}", reader.description, filename) if move_to: dest_file = os.path.join(move_to, os.path.basename(filename)) _, ext = os.path.splitext(dest_file) if ext != reader.suggested_extension(): dest_file += reader.suggested_extension() reader.move_to(destination_filename=dest_file) else: log.info("Unrecognized or unwanted contents: " + filename) if delete_if_not_specified_file_type: log.info("Deleting: " + filename) os.remove(filename) except Exception as e: # Must explicitly catch and report errors, since otherwise they vanish # into the ether. log.critical("Uncaught error in subprocess: {!r}\n{}", e, traceback.format_exc()) raise
0.000549
def write_users(dburl): """Write users to the DB.""" data = { 'username': 'admin', 'realname': 'Website Administrator', 'email': '[email protected]', 'password': r'$bcrypt-sha256$2a,12$NNtd2TC9mZO6.EvLwEwlLO$axojD34/iE8x' r'QitQnCCOGPhofgmjNdq', } for p in PERMISSIONS: data[p] = '1' db = redis.StrictRedis.from_url(dburl) db.hmset('user:1', data) db.hset('users', 'admin', '1') if not db.exists('last_uid'): db.incr('last_uid') print("Username: admin") print("Password: admin") return 0
0.001653
def get_id(self): ''' :returns: Object ID of associated app :rtype: string Returns the object ID of the app that the handler is currently associated with. ''' if self._dxid is not None: return self._dxid else: return 'app-' + self._name + '/' + self._alias
0.005797
def read_short(source, offset): """Reads a number from a byte array. :param bytes source: Source byte string :param int offset: Point in byte string to start reading :returns: Read number and offset at point after read data :rtype: tuple of ints :raises: SerializationError if unable to unpack """ try: (short,) = struct.unpack_from(">H", source, offset) return short, offset + struct.calcsize(">H") except struct.error: raise SerializationError("Bad format of serialized context.")
0.001842
def get_quotation_kline(self, symbol, period, limit=None, _async=False): """ 获取etf净值 :param symbol: etf名称 :param period: K线类型 :param limit: 获取数量 :param _async: :return: """ params = {} path = '/quotation/market/history/kline' params['symbol'] = symbol params['period'] = period if limit: params['limit'] = limit return api_key_get(params, path, _async=_async)
0.004132
def show_plot(plot, width=PREVIEW_WIDTH, height=PREVIEW_HEIGHT): """Preview a plot in a jupyter notebook. Args: plot (list): the plot to display (list of layers) width (int): the width of the preview height (int): the height of the preview Returns: An object that renders in Jupyter as the provided plot """ return SVG(data=plot_to_svg(plot, width, height))
0.004819
def create(self, healthCheckNotification, instance, ipAddressResourceId, name, notificationContacts, rules, loadBalancerClassOfServiceID=1, *args, **kwargs): """ :type healthCheckNotification: bool :type instance: list[Instance] :type ipAddressResourceId: list[int] :type loadBalancerClassOfServiceID: int :type name: str :type notificationContacts: NotificationContacts or list[NotificationContact] :type rules: Rules :param healthCheckNotification: Enable or disable notifications :param instance: List of balanced IP Addresses (VM or server) :param ipAddressResourceId: ID of the IP Address resource of the Load Balancer :param loadBalancerClassOfServiceID: default 1 :param name: Name of the Load Balancer :param notificationContacts: Nullable if notificationContacts is false :param rules: List of NewLoadBalancerRule object containing the list of rules to be configured with the service """ response = self._call(method=SetEnqueueLoadBalancerCreation, healthCheckNotification=healthCheckNotification, instance=instance, ipAddressResourceId=ipAddressResourceId, name=name, notificationContacts=notificationContacts, rules=rules, loadBalancerClassOfServiceID=loadBalancerClassOfServiceID, *args, **kwargs)
0.005
def _expectation(p, rbf_kern, feat1, lin_kern, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: RBF kernel - K_rbf_{.,.} :: Linear kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2 """ if rbf_kern.on_separate_dims(lin_kern) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (rbf_kern, feat1)) eKxz2 = expectation(p, (lin_kern, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if feat1 != feat2: raise NotImplementedError("Features have to be the same for both kernels.") if rbf_kern.active_dims != lin_kern.active_dims: raise NotImplementedError("active_dims have to be the same for both kernels.") with params_as_tensors_for(rbf_kern, lin_kern, feat1, feat2): # use only active dimensions Xcov = rbf_kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = rbf_kern._slice(feat1.Z, p.mu) N = tf.shape(Xmu)[0] D = tf.shape(Xmu)[1] lin_kern_variances = lin_kern.variance if lin_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + lin_kern.variance rbf_kern_lengthscales = rbf_kern.lengthscales if rbf_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + rbf_kern.lengthscales ## Begin RBF eKxz code: chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(rbf_kern_lengthscales ** 2) + Xcov) # NxDxD Z_transpose = tf.transpose(Z) all_diffs = Z_transpose - tf.expand_dims(Xmu, 2) # NxDxM exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True) # NxDxM exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM sqrt_det_L = tf.reduce_prod(rbf_kern_lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N eKxz_rbf = rbf_kern.variance * (determinants[:, None] * exponent_mahalanobis) ## NxM <- End RBF eKxz code tiled_Z = tf.tile(tf.expand_dims(Z_transpose, 0), (N, 1, 1)) # NxDxM z_L_inv_Xcov = tf.matmul(tiled_Z, Xcov / rbf_kern_lengthscales[:, None] ** 2., transpose_a=True) # NxMxD cross_eKzxKxz = tf.cholesky_solve( chol_L_plus_Xcov, (lin_kern_variances * rbf_kern_lengthscales ** 2.)[..., None] * tiled_Z) # NxDxM cross_eKzxKxz = tf.matmul((z_L_inv_Xcov + Xmu[:, None, :]) * eKxz_rbf[..., None], cross_eKzxKxz) # NxMxM return cross_eKzxKxz
0.006177
def make_long_description(): """ Generate the reST long_description for setup() from source files. Returns the generated long_description as a unicode string. """ readme_path = README_PATH # Remove our HTML comments because PyPI does not allow it. # See the setup.py docstring for more info on this. readme_md = strip_html_comments(read(readme_path)) history_md = strip_html_comments(read(HISTORY_PATH)) license_md = """\ License ======= """ + read(LICENSE_PATH) sections = [readme_md, history_md, license_md] md_description = '\n\n'.join(sections) # Write the combined Markdown file to a temp path. md_ext = os.path.splitext(readme_path)[1] md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext) write(md_description, md_description_path) rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH) long_description = convert_md_to_rst(md_path=md_description_path, rst_temp_path=rst_temp_path) return "\n".join([RST_LONG_DESCRIPTION_INTRO, long_description])
0.000912
def copy_assets(self, assets_path): """Banana banana """ if not os.path.exists(assets_path): os.mkdir(assets_path) extra_files = self._get_extra_files() for ex_files in Formatter.get_extra_files_signal(self): extra_files.extend(ex_files) for src, dest in extra_files: dest = os.path.join(assets_path, dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if os.path.isfile(src): shutil.copy(src, dest) elif os.path.isdir(src): recursive_overwrite(src, dest)
0.00295
def __collapse_stranded(s, proc_strands, names=False, verbose=False): """ Get the union of a set of genomic intervals. given a list of genomic intervals with chromosome, start, end and strand fields, collapse those intervals with strand in the set <proc_strands> into a set of non-overlapping intervals. Other intervals are ignored. Intervals must be sorted by chromosome and then start coordinate. :note: O(n) time, O(n) space :return: list of intervals that define the collapsed regions. Note that these are all new objects, no existing object from s is returned or altered. Returned regions will all have name "X" and score 0 :param s: list of genomic regions to collapse :param proc_strands: set of acceptable strands; ignore input intervals with strand not found in this set. :param names: if True, accumulate region names. If false, all output regions have name "X" :param verbose: if True, output progress message to stderr. :raise GenomicIntervalError: if the input regions are not correctly sorted (chromosome then start) """ def get_first_matching_index(s, proc_strands): for i in range(0, len(s)): if s[i].strand in proc_strands: return i return None if proc_strands not in [set("+"), set("-"), set(["+", "-"])]: raise GenomicIntervalError("failed collapsing intervals on strands '" + ",".join(proc_strands) + "''; unrecognised " + "strand symbols") first_index = get_first_matching_index(s, proc_strands) if first_index is None: return [] res = [] current = copy.copy(s[first_index]) current.strand = '+' if (proc_strands == set("+") or proc_strands == set(["+", "-"])) else '-' current.score = 0 current.name = "X" if not names else set(s[first_index].name) for i in range(first_index + 1, len(s)): if s[i].strand not in proc_strands: continue # make sure things are sorted.. if (s[i].chrom < s[i - 1].chrom) or \ (s[i].chrom == s[i - 1].chrom and s[i].start < s[i - 1].start): raise GenomicIntervalError("collapsing regions failed. saw this " + "region: " + str(s[i - 1]) + " before this " + "one: " + str(s[i])) # because of sorting order, we know that nothing else exists with # start less than s[i] which we haven't already seen. if s[i].start > current.end or s[i].chrom != current.chrom: if names: current.name = ";".join(current.name) res.append(current) current = copy.copy(s[i]) current.strand = '+' if (proc_strands == set("+") or proc_strands == set(["+", "-"])) else '-' current.score = 0 current.name = "X" if not names else set(s[i].name) else: current.end = max(s[i].end, current.end) if names: current.name.add(s[i].name) # don't forget the last one... if names: current.name = ";".join(current.name) res.append(current) return res
0.008488
def flexion(self, x, y, kwargs, diff=0.000001): """ third derivatives (flexion) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param diff: numerical differential length of Hessian :return: f_xxx, f_xxy, f_xyy, f_yyy """ f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs) f_xx_dx, f_xy_dx, f_yx_dx, f_yy_dx = self.hessian(x + diff, y, kwargs) f_xx_dy, f_xy_dy, f_yx_dy, f_yy_dy = self.hessian(x, y + diff, kwargs) f_xxx = (f_xx_dx - f_xx) / diff f_xxy = (f_xx_dy - f_xx) / diff f_xyy = (f_xy_dy - f_xy) / diff f_yyy = (f_yy_dy - f_yy) / diff return f_xxx, f_xxy, f_xyy, f_yyy
0.003311
def _secured_storage_parameters(self): """ Updates storage parameters with unsecure mode. Returns: dict: Updated storage_parameters. """ parameters = self._storage_parameters or dict() # Handles unsecure mode if self._unsecure: parameters = parameters.copy() parameters['protocol'] = 'http' return parameters
0.004866
def nmap(nmap_args, ips): """ Start an nmap process with the given args on the given ips. """ config = Config() arguments = ['nmap', '-Pn'] arguments.extend(ips) arguments.extend(nmap_args) output_file = '' now = datetime.datetime.now() if not '-oA' in nmap_args: output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M")) path_name = os.path.join(config.get('nmap', 'directory'), output_name) print_notification("Writing output of nmap to {}".format(path_name)) if not os.path.exists(config.get('nmap', 'directory')): os.makedirs(config.get('nmap', 'directory')) output_file = path_name + '.xml' arguments.extend(['-oA', path_name]) else: output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml' print_notification("Starting nmap") subprocess.call(arguments) with open(output_file, 'r') as f: return f.read()
0.002079
def resolve_group_names(self, r, target_group_ids, groups): """Resolve any security group names to the corresponding group ids With the context of a given network attached resource. """ names = self.get_group_names(target_group_ids) if not names: return target_group_ids target_group_ids = list(target_group_ids) vpc_id = self.vpc_expr.search(r) if not vpc_id: raise PolicyExecutionError(self._format_error( "policy:{policy} non vpc attached resource used " "with modify-security-group: {resource_id}", resource_id=r[self.manager.resource_type.id])) found = False for n in names: for g in groups: if g['GroupName'] == n and g['VpcId'] == vpc_id: found = g['GroupId'] if not found: raise PolicyExecutionError(self._format_error(( "policy:{policy} could not resolve sg:{name} for " "resource:{resource_id} in vpc:{vpc}"), name=n, resource_id=r[self.manager.resource_type.id], vpc=vpc_id)) target_group_ids.remove(n) target_group_ids.append(found) return target_group_ids
0.001526
def encode_basic_auth(username, password): """ Encode basic auth credentials. """ return "Basic {}".format( b64encode( "{}:{}".format( username, password, ).encode("utf-8") ).decode("utf-8") )
0.003509
def series(self, sid, recycle_id=None, head=None, tail=None, datetime=True): """ Create data Series Parameters ---------- sid : str recycle_id : optional head : int | pandas.Timestamp, optional Start of the interval default earliest available tail : int | pandas.Timestamp, optional End of the interval default max epoch datetime : bool convert index to datetime default True Returns ------- pandas.Series """ if head is None: head = 0 else: head = self._2epochs(head) if tail is None: tail = EPOCHS_MAX else: tail = self._2epochs(tail) if recycle_id is None: self.dbcur.execute(SQL_TMPO_RID_MAX, (sid,)) recycle_id = self.dbcur.fetchone()[0] tlist = self.list(sid)[0] srlist = [] for _sid, rid, lvl, bid, ext, ctd, blk in tlist: if (recycle_id == rid and head < self._blocktail(lvl, bid) and tail >= bid): srlist.append(self._blk2series(ext, blk, head, tail)) if len(srlist) > 0: ts = pd.concat(srlist) ts.name = sid if datetime is True: ts.index = pd.to_datetime(ts.index, unit="s", utc=True) return ts else: return pd.Series([], name=sid)
0.003298
def calculate_dates(self, dt): """ Given a date, find that day's open and period end (open + offset). """ period_start, period_close = self.cal.open_and_close_for_session( self.cal.minute_to_session_label(dt), ) # Align the market open and close times here with the execution times # used by the simulation clock. This ensures that scheduled functions # trigger at the correct times. self._period_start = self.cal.execution_time_from_open(period_start) self._period_close = self.cal.execution_time_from_close(period_close) self._period_end = self._period_start + self.offset - self._one_minute
0.002878
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag): """ use the info smoother to solve a symmetric block tridiagonal system """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
0.001479
def send(self,message,message_type,topic=''): """ Send the message on the socket. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''. """ if message_type == RAW: self._sock.send(message) elif message_type == PYOBJ: self._sock.send_pyobj(message) elif message_type == JSON: self._sock.send_json(message) elif message_type == MULTIPART: self._sock.send_multipart([topic, message]) elif message_type == STRING: self._sock.send_string(message) elif message_type == UNICODE: self._sock.send_unicode(message) else: raise Exception("Unknown message type %s"%(message_type,))
0.007919
def can_create_asset_content(self, asset_id=None): """Tests if this user can create content for ``Assets``. A return of true does not guarantee successful authorization. A return of false indicates that it is known creating an ``Asset`` will result in a ``PermissionDenied``. This is intended as a hint to an application that may opt not to offer create operations to an unauthorized user. :param asset_id: the ``Id`` of an ``Asset`` :type asset_id: ``osid.id.Id`` :return: ``false`` if ``Asset`` content ceration is not authorized, ``true`` otherwise :rtype: ``boolean`` :raise: ``NullArgument`` -- ``asset_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['assetHints']['canCreate']
0.003067
def upgrade(): """Upgrade database.""" op.create_table( 'crawler_job', sa.Column('id', sa.Integer, primary_key=True, autoincrement=True), sa.Column('job_id', UUIDType, index=True), sa.Column('spider', sa.String(255), index=True), sa.Column('workflow', sa.String(255), index=True), sa.Column('results', sa.Text, nullable=True), sa.Column( 'status', ChoiceType(JobStatus, impl=sa.String(10)), nullable=False ), sa.Column('logs', sa.Text, nullable=True), sa.Column( 'scheduled', sa.DateTime, default=datetime.now, nullable=False, index=True ) ) op.create_table( 'crawler_workflows_object', sa.Column('job_id', UUIDType, primary_key=True), sa.Column( 'object_id', sa.Integer, sa.ForeignKey( 'workflows_object.id', ondelete="CASCADE", onupdate="CASCADE", ), primary_key=True ) )
0.000894
def logical_or_expr(self): """ logical_or_expr: logical_and_expr ('or' logical_and_expr)* """ node = self.logical_and_expr() while self.token.nature == Nature.OR: token = self.token self._process(Nature.OR) node = BinaryOperation(left=node, op=token, right=self.logical_and_expr()) return node
0.007895
def draw(self): """ Render and draw the world and robots. """ from calysto.display import display, clear_output canvas = self.render() clear_output(wait=True) display(canvas)
0.008696
def delete_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs): """ delete a HorizontalPodAutoscaler This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the HorizontalPodAutoscaler (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) return data
0.00412
def last(iterable, default=None): """Try to get the last iterable item by successive iteration on it. :param Iterable iterable: iterable to iterate on. Must provide the method __iter__. :param default: default value to get if input iterable is empty. :raises TypeError: if iterable is not an iterable value. :Example: >>> last('tests') 's' >>> last('', default='test') 'test' >>> last([]) None""" result = default iterator = iter(iterable) while True: try: result = next(iterator) except StopIteration: break return result
0.001567
def handle(self, signum, handler): """Set a function to run when the given signal is recieved. Multiple handlers may be assigned to a single signal. The order of handlers does not need to be preserved. 'signum' must be an integer representing a signal. 'handler' must be a callable. """ if not isinstance(signum, int): raise TypeError( "Signals must be given as integers. Got {0}.".format( type(signum), ), ) if not callable(handler): raise TypeError( "Signal handlers must be callable.", ) signal.signal(signum, self._handle_signals) self._handlers[signum].append(handler)
0.002577
def transform_aglistener_output(result): ''' Transforms the result of Availability Group Listener to eliminate unnecessary parameters. ''' from collections import OrderedDict from msrestazure.tools import parse_resource_id try: resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] # Create a dictionary with the relevant parameters output = OrderedDict([('id', result.id), ('name', result.name), ('provisioningState', result.provisioning_state), ('port', result.port), ('resourceGroup', resource_group)]) # Note, wsfcDomainCredentials will not display if result.load_balancer_configurations is not None: output['loadBalancerConfigurations'] = format_load_balancer_configuration_list(result.load_balancer_configurations) return output except AttributeError: # Return the response object if the formating fails return result
0.00363
def read_rels(archive): """Read relationships for a workbook""" xml_source = archive.read(ARC_WORKBOOK_RELS) tree = fromstring(xml_source) for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS): rId = element.get('Id') pth = element.get("Target") typ = element.get('Type') # normalise path if pth.startswith("/xl"): pth = pth.replace("/xl", "xl") elif not pth.startswith("xl") and not pth.startswith(".."): pth = "xl/" + pth yield rId, {'path':pth, 'type':typ}
0.005272
def apply_operation_to(self, path): """Add `a:lnTo` element to *path* for this line segment. Returns the `a:lnTo` element newly added to the path. """ return path.add_lnTo( self._x - self._freeform_builder.shape_offset_x, self._y - self._freeform_builder.shape_offset_y )
0.005952
def full_exec_request_actions(actions, func=None, render_func=None): """Full process to execute before, during and after actions. If func is specified, it will be called after exec_request_actions() unless a ContextExitException was raised. If render_func is specified, it will be called after exec_request_actions() only if there is no response. exec_after_request_actions() is always called. """ response = None try: exec_before_request_actions(actions, catch_context_exit=False) exec_request_actions(actions, catch_context_exit=False) if func: response = func() except ContextExitException as e: response = e.result except ReturnValueException as e: response = e.value if render_func and response is None: response = render_func() return exec_after_request_actions(actions, response)
0.001119
def dispatch(self, block = False, timeout = None): """Get the next event from the queue and pass it to the appropriate handlers. :Parameters: - `block`: wait for event if the queue is empty - `timeout`: maximum time, in seconds, to wait if `block` is `True` :Types: - `block`: `bool` - `timeout`: `float` :Return: the event handled (may be `QUIT`) or `None` """ logger.debug(" dispatching...") try: event = self.queue.get(block, timeout) except Queue.Empty: logger.debug(" queue empty") return None try: logger.debug(" event: {0!r}".format(event)) if event is QUIT: return QUIT handlers = list(self._handler_map[None]) klass = event.__class__ if klass in self._handler_map: handlers += self._handler_map[klass] logger.debug(" handlers: {0!r}".format(handlers)) # to restore the original order of handler objects handlers.sort(key = lambda x: x[0]) for dummy, handler in handlers: logger.debug(u" passing the event to: {0!r}".format(handler)) result = handler(event) if isinstance(result, Event): self.queue.put(result) elif result and event is not QUIT: return event return event finally: self.queue.task_done()
0.005118
def vcfunpackinfo(table, *keys): """ Unpack the INFO field into separate fields. E.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = ( ... etl ... .fromvcf('fixture/sample.vcf', samples=None) ... .vcfunpackinfo() ... ) >>> table1 +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | CHROM | POS | ID | REF | ALT | QUAL | FILTER | AA | AC | AF | AN | DB | DP | H2 | NS | +=======+=========+=============+=====+========+======+=========+======+======+================+======+======+======+======+======+ | '19' | 111 | None | 'A' | [C] | 9.6 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '19' | 112 | None | 'A' | [G] | 10 | None | None | None | None | None | None | None | None | None | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 14370 | 'rs6054257' | 'G' | [A] | 29 | [] | None | None | [0.5] | None | True | 14 | True | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 17330 | None | 'T' | [A] | 3 | ['q10'] | None | None | [0.017] | None | None | 11 | None | 3 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ | '20' | 1110696 | 'rs6040355' | 'A' | [G, T] | 67 | [] | 'T' | None | [0.333, 0.667] | None | True | 10 | None | 2 | +-------+---------+-------------+-----+--------+------+---------+------+------+----------------+------+------+------+------+------+ ... """ result = etl.unpackdict(table, 'INFO', keys=keys) return result
0.003073
def _status(self): """Update the resource dictionary with job statuses.""" job_id_list = ' '.join(self.resources.keys()) cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list) retcode, stdout, stderr = super().execute_wait(cmd) """ Example output: $ condor_q 34524642.0 34524643.0 -af:jr JobStatus 34524642.0 2 34524643.0 1 """ for line in stdout.strip().split('\n'): parts = line.split() job_id = parts[0] status = translate_table.get(parts[1], 'UNKNOWN') self.resources[job_id]['status'] = status
0.003125
def journal_history(self): """List of named tuples of authored publications in the form (sourcetitle, abbreviation, type, issn). issn is only given for journals. abbreviation and issn may be None. """ jour = namedtuple('Journal', 'sourcetitle abbreviation type issn') path = ['author-profile', 'journal-history', 'journal'] hist = [jour(sourcetitle=pub['sourcetitle'], issn=pub.get('issn'), abbreviation=pub.get('sourcetitle-abbrev'), type=pub['@type']) for pub in listify(chained_get(self._json, path, []))] return hist or None
0.003067
def condense_duplicates(self): """Condense duplicate points using a transformation matrix. This is useful if you have multiple non-transformed points at the same location or multiple transformed points that use the same quadrature points. Won't change the GP if all of the rows of [X, n] are unique. Will create a transformation matrix T if necessary. Note that the order of the points in [X, n] will be arbitrary after this operation. If there are any transformed quantities (i.e., `self.T` is not None), it will also remove any quadrature points for which all of the weights are zero (even if all of the rows of [X, n] are unique). """ unique, inv = unique_rows( scipy.hstack((self.X, self.n)), return_inverse=True ) # Only proceed if there is anything to be gained: if len(unique) != len(self.X): if self.T is None: self.T = scipy.eye(len(self.y)) new_T = scipy.zeros((len(self.y), unique.shape[0])) for j in xrange(0, len(inv)): new_T[:, inv[j]] += self.T[:, j] self.T = new_T self.n = unique[:, self.X.shape[1]:] self.X = unique[:, :self.X.shape[1]] # Also remove any points which don't enter into the calculation: if self.T is not None: # Find the columns of T which actually enter in: # Recall that T is (n, n_Q), X is (n_Q, n_dim). good_cols = (self.T != 0.0).any(axis=0) self.T = self.T[:, good_cols] self.X = self.X[good_cols, :] self.n = self.n[good_cols, :]
0.004042
def clear(self): """Clear output of one climate variable """ # mark this task as incomplete self.mark_incomplete() # Delete the indicator metadata, this also deletes values by cascading. for suffix in list(CLIMATE_SEASON_SUFFIXES.values()): try: # noinspection PyUnresolvedReferences indicator = self.session.query(models.ClimateIndicator) \ .filter(models.ClimateIndicator.description == self.description + suffix) \ .one() self.session.delete(indicator) except NoResultFound: # Data didn't exist yet, no problem pass self.close_session()
0.004049
def get_frames(self): """Rectify and return current frames from cameras.""" frames = super(CalibratedPair, self).get_frames() return self.calibration.rectify(frames)
0.010582
def compress_file(filepath, compression="gz"): """ Compresses a file with the correct extension. Functions like standard Unix command line gzip and bzip2 in the sense that the original uncompressed files are not retained. Args: filepath (str): Path to file. compression (str): A compression mode. Valid options are "gz" or "bz2". Defaults to "gz". """ if compression not in ["gz", "bz2"]: raise ValueError("Supported compression formats are 'gz' and 'bz2'.") from monty.io import zopen if not filepath.lower().endswith(".%s" % compression): with open(filepath, 'rb') as f_in, \ zopen('%s.%s' % (filepath, compression), 'wb') as f_out: f_out.writelines(f_in) os.remove(filepath)
0.001259
def peers(**kwargs): # pylint: disable=unused-argument ''' Returns a list the NTP peers configured on the network device. :return: configured NTP peers as list. CLI Example: .. code-block:: bash salt '*' ntp.peers Example output: .. code-block:: python [ '192.168.0.1', '172.17.17.1', '172.17.17.2', '2400:cb00:6:1024::c71b:840a' ] ''' ntp_peers = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable 'get_ntp_peers', **{ } ) if not ntp_peers.get('result'): return ntp_peers ntp_peers_list = list(ntp_peers.get('out', {}).keys()) ntp_peers['out'] = ntp_peers_list return ntp_peers
0.001271
def get_vulnerabilities(self, teams=None, applications=None, channel_types=None, start_date=None, end_date=None, generic_severities=None, generic_vulnerabilities=None, number_merged=None, number_vulnerabilities=None, parameter=None, path=None, show_open=None, show_closed=None, show_defect_open=None, show_defect_closed=None, show_defect_present=None, show_defect_not_present=None, show_false_positive=None, show_hidden=None): """ Returns filtered list of vulnerabilities. :param teams: List of team ids. :param applications: List of application ids. :param channel_types: List of scanner names. :param start_date: Lower bound on scan dates. :param end_date: Upper bound on scan dates. :param generic_severities: List of generic severity values. :param generic_vulnerabilities: List of generic vulnerability ids. :param number_merged: Number of vulnerabilities merged from different scans. :param number_vulnerabilities: Number of vulnerabilities to return. :param parameter: Application input that the vulnerability affects. :param path: Path to the web page where the vulnerability was found. :param show_open: Flag to show all open vulnerabilities. :param show_closed: Flag to show all closed vulnerabilities. :param show_defect_open: Flag to show any vulnerabilities with open defects. :param show_defect_closed: Flag to show any vulnerabilities with closed defects. :param show_defect_present: Flag to show any vulnerabilities with a defect. :param show_defect_not_present: Flag to show any vulnerabilities without a defect. :param show_false_positive: Flag to show any false positives from vulnerabilities. :param show_hidden: Flag to show all hidden vulnerabilities. """ params = {} # Build parameter list if teams: params.update(self._build_list_params('teams', 'id', teams)) if applications: params.update(self._build_list_params('applications', 'id', applications)) if channel_types: params.update(self._build_list_params('channelTypes', 'name', channel_types)) if start_date: params['startDate'] = start_date if end_date: params['endDate'] = end_date if generic_severities: params.update(self._build_list_params('genericSeverities', 'intValue', generic_severities)) if generic_vulnerabilities: params.update(self._build_list_params('genericVulnerabilities', 'id', generic_vulnerabilities)) if number_merged: params['numberMerged'] = number_merged if number_vulnerabilities: params['numberVulnerabilities'] = number_vulnerabilities if parameter: params['parameter'] = parameter if path: params['path'] = path if show_open: params['showOpen'] = show_open if show_closed: params['showClosed'] = show_closed if show_defect_open: params['showDefectOpen'] = show_defect_open if show_defect_closed: params['showDefectClosed'] = show_defect_closed if show_defect_present: params['showDefectPresent'] = show_defect_present if show_defect_not_present: params['showDefectNotPresent'] = show_defect_not_present if show_false_positive: params['showFalsePositive'] = show_false_positive if show_hidden: params['showHidden'] = show_hidden return self._request('POST', 'rest/vulnerabilities', params)
0.005542
def Delete(self, n = 1, dl = 0): """删除键n次 """ self.Delay(dl) self.keyboard.tap_key(self.keyboard.delete_key, n)
0.041958
def _get_sysv_services(root, systemd_services=None): ''' Use os.listdir() and os.access() to get all the initscripts ''' initscript_path = _root(INITSCRIPT_PATH, root) try: sysv_services = os.listdir(initscript_path) except OSError as exc: if exc.errno == errno.ENOENT: pass elif exc.errno == errno.EACCES: log.error( 'Unable to check sysvinit scripts, permission denied to %s', initscript_path ) else: log.error( 'Error %d encountered trying to check sysvinit scripts: %s', exc.errno, exc.strerror ) return [] if systemd_services is None: systemd_services = _get_systemd_services(root) ret = [] for sysv_service in sysv_services: if os.access(os.path.join(initscript_path, sysv_service), os.X_OK): if sysv_service in systemd_services: log.debug( 'sysvinit script \'%s\' found, but systemd unit ' '\'%s.service\' already exists', sysv_service, sysv_service ) continue ret.append(sysv_service) return ret
0.000786
def add_coord(self, x, y): """ Adds a coord to the polyline and creates another circle """ x = x*self.x_factor y = y*self.y_factor self.plotData.add_coord(x, y) self.circles_list.append(gui.SvgCircle(x, y, self.circle_radius)) self.append(self.circles_list[-1]) if len(self.circles_list) > self.maxlen: self.remove_child(self.circles_list[0]) del self.circles_list[0]
0.004396
def sphere_analytical_gaussian(dr, a, alpha=0.2765): """ Analytically calculate the sphere's functional form by convolving the Heavyside function with first order approximation to the sinc, a Gaussian. The alpha parameters controls the width of the approximation -- should be 1, but is fit to be roughly 0.2765 """ term1 = 0.5*(erf((dr+2*a)/(alpha*np.sqrt(2))) + erf(-dr/(alpha*np.sqrt(2)))) term2 = np.sqrt(0.5/np.pi)*(alpha/(dr+a+1e-10)) * ( np.exp(-0.5*dr**2/alpha**2) - np.exp(-0.5*(dr+2*a)**2/alpha**2) ) return term1 - term2
0.003367
def index_data(self, data, index_name, doc_type): """Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails. """ # Index the data (which needs to be a dict/object) if it's not # we're going to toss an exception if not isinstance(data, dict): raise RuntimeError('Index failed, data needs to be a dict!') try: self.els_search.index(index=index_name, doc_type=doc_type, body=data) except Exception, error: print 'Index failed: %s' % str(error) raise RuntimeError('Index failed: %s' % str(error))
0.00358
def create_user(uid=None, username=None, password=None, priv=None): ''' Create a CIMC user with username and password. Args: uid(int): The user ID slot to create the user account in. username(str): The name of the user. password(str): The clear text password of the user. priv(str): The privilege level of the user. CLI Example: .. code-block:: bash salt '*' cimc.create_user 11 username=admin password=foobar priv=admin ''' if not uid: raise salt.exceptions.CommandExecutionError("The user ID must be specified.") if not username: raise salt.exceptions.CommandExecutionError("The username must be specified.") if not password: raise salt.exceptions.CommandExecutionError("The password must be specified.") if not priv: raise salt.exceptions.CommandExecutionError("The privilege level must be specified.") dn = "sys/user-ext/user-{0}".format(uid) inconfig = """<aaaUser id="{0}" accountStatus="active" name="{1}" priv="{2}" pwd="{3}" dn="sys/user-ext/user-{0}"/>""".format(uid, username, priv, password) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
0.004323
def _fmix(self, T, rho, A): r"""Specific Helmholtz energy of air-water interaction Parameters ---------- T : float Temperature, [K] rho : float Density, [kg/m³] A : float Mass fraction of dry air in humid air, [kg/kg] Returns ------- prop : dict Dictionary with helmholtz energy and derivatives: * fir, [kJ/kg] * fira: :math:`\left.\frac{\partial f_{mix}}{\partial A}\right|_{T,\rho}`, [kJ/kg] * firt: :math:`\left.\frac{\partial f_{mix}}{\partial T}\right|_{A,\rho}`, [kJ/kgK] * fird: :math:`\left.\frac{\partial f_{mix}}{\partial \rho}\right|_{A,T}`, [kJ/m³kg²] * firaa: :math:`\left.\frac{\partial^2 f_{mix}}{\partial A^2}\right|_{T, \rho}`, [kJ/kg] * firat: :math:`\left.\frac{\partial^2 f_{mix}}{\partial A \partial T}\right|_{\rho}`, [kJ/kgK] * firad: :math:`\left.\frac{\partial^2 f_{mix}}{\partial A \partial \rho}\right|_T`, [kJ/m³kg²] * firtt: :math:`\left.\frac{\partial^2 f_{mix}}{\partial T^2}\right|_{A, \rho}`, [kJ/kgK²] * firdt: :math:`\left.\frac{\partial^2 f_{mix}}{\partial \rho \partial T}\right|_A`, [kJ/m³kg²K] * firdd: :math:`\left.\frac{\partial^2 f_{mix}}{\partial \rho^2}\right|_{A, T}`, [kJ/m⁶kg³] References ---------- IAPWS, Guideline on an Equation of State for Humid Air in Contact with Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the Thermodynamic Properties of Seawater, Table 10, http://www.iapws.org/relguide/SeaAir.html """ Ma = Air.M/1000 Mw = IAPWS95.M/1000 vir = _virial(T) Baw = vir["Baw"] Bawt = vir["Bawt"] Bawtt = vir["Bawtt"] Caaw = vir["Caaw"] Caawt = vir["Caawt"] Caawtt = vir["Caawtt"] Caww = vir["Caww"] Cawwt = vir["Cawwt"] Cawwtt = vir["Cawwtt"] # Eq T45 f = 2*A*(1-A)*rho*R*T/Ma/Mw*(Baw+3*rho/4*(A/Ma*Caaw+(1-A)/Mw*Caww)) # Eq T46 fa = 2*rho*R*T/Ma/Mw*((1-2*A)*Baw+3*rho/4*( A*(2-3*A)/Ma*Caaw+(1-A)*(1-3*A)/Mw*Caww)) # Eq T47 ft = 2*A*(1-A)*rho*R/Ma/Mw*( Baw+T*Bawt+3*rho/4*(A/Ma*(Caaw+T*Caawt)+(1-A)/Mw*(Caww+T*Cawwt))) # Eq T48 fd = A*(1-A)*R*T/Ma/Mw*(2*Baw+3*rho*(A/Ma*Caaw+(1-A)/Mw*Caww)) # Eq T49 faa = rho*R*T/Ma/Mw*(-4*Baw+3*rho*((1-3*A)/Ma*Caaw-(2-3*A)/Mw*Caww)) # Eq T50 fat = 2*rho*R/Ma/Mw*(1-2*A)*(Baw+T*Bawt)+3*rho**2*R/2/Ma/Mw*( A*(2-3*A)/Ma*(Caaw+T*Caawt)+(1-A)*(1-3*A)/Mw*(Caww+T*Cawwt)) # Eq T51 fad = 2*R*T/Ma/Mw*((1-2*A)*Baw+3/2*rho*( A*(2-3*A)/Ma*Caaw+(1-A)*(1-3*A)/Mw*Caww)) # Eq T52 ftt = 2*A*(1-A)*rho*R/Ma/Mw*(2*Bawt+T*Bawtt+3*rho/4*( A/Ma*(2*Caawt+T*Caawtt)+(1-A)/Mw*(2*Cawwt+T*Cawwtt))) # Eq T53 ftd = 2*A*(1-A)*R/Ma/Mw*(Baw+T*Bawt+3*rho/2*( A/Ma*(Caaw+T*Caawt)+(1-A)/Mw*(Caww+T*Cawwt))) # Eq T54 fdd = 3*A*(1-A)*R*T/Ma/Mw*(A/Ma*Caaw+(1-A)/Mw*Caww) prop = {} prop["fir"] = f/1000 prop["fira"] = fa/1000 prop["firt"] = ft/1000 prop["fird"] = fd/1000 prop["firaa"] = faa/1000 prop["firat"] = fat/1000 prop["firad"] = fad/1000 prop["firtt"] = ftt/1000 prop["firdt"] = ftd/1000 prop["firdd"] = fdd/1000 return prop
0.003115
def rfc2426(self): """RFC2426-encode the field content. :return: the field in the RFC 2426 format. :returntype: `str`""" return rfc2425encode("n",u';'.join(quote_semicolon(val) for val in (self.family,self.given,self.middle,self.prefix,self.suffix)))
0.026756
def is_running(process): ''' Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running? ''' if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
0.031283
def match_descendant_objective_id(self, objective_id=None, match=None): """Sets the objective ``Id`` for this query to match objectives that have the specified objective as a descendant. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if match: self._add_match('descendantObjectiveId', objective_id) else: raise errors.Unimplemented()
0.004615
def get_course(self, courseid): """ Return the course """ try: course = self.course_factory.get_course(courseid) except: raise web.notfound() return course
0.014151
def publish(self, message, exchange=None): """ Publish a :class:`fedora_messaging.message.Message` to an `exchange`_ on the message broker. This call will survive connection failures and try until it succeeds or is canceled. Args: message (message.Message): The message to publish. exchange (str): The name of the AMQP exchange to publish to; defaults to :ref:`conf-publish-exchange` returns: defer.Deferred: A deferred that fires when the message is published. Raises: PublishReturned: If the published message is rejected by the broker. ConnectionException: If a connection error occurs while publishing. Calling this method again will wait for the next connection and publish when it is available. .. _exchange: https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges """ exchange = exchange or config.conf["publish_exchange"] while True: client = yield self.whenConnected() try: yield client.publish(message, exchange) break except ConnectionException: continue
0.00716
def assign_did(self, did): """ assign the DID as the key id, if the DID does not have a '#value' at the end, then automatically add a new key value """ if re.match('^#.*', self._id): self._id = did + self._id if re.match('^#.*', self._owner): self._owner = did + self._owner
0.00578
def getTrustForJID(self, bare_jid): """ All-in-one trust information for all devices of a bare jid. The result is structured like this: { "active" : { device: int => trust_info } "inactive" : { device: int => trust_info } } where trust_info is the structure returned by getTrustForDevice. """ result = { "active" : {}, "inactive" : {} } devices = yield self.__loadActiveDevices(bare_jid) for device in devices: result["active"][device] = yield self.getTrustForDevice(bare_jid, device) devices = yield self.__loadInactiveDevices(bare_jid) for device in devices: result["inactive"][device] = yield self.getTrustForDevice(bare_jid, device) promise.returnValue(result)
0.006977
def graph_from_file(filename, bidirectional=False, simplify=True, retain_all=False, name='unnamed'): """ Create a networkx graph from OSM data in an XML file. Parameters ---------- filename : string the name of a file containing OSM XML data bidirectional : bool if True, create bidirectional edges for one-way streets simplify : bool if True, simplify the graph topology retain_all : bool if True, return the entire graph even if it is not connected name : string the name of the graph Returns ------- networkx multidigraph """ # transmogrify file of OSM XML data into JSON response_jsons = [overpass_json_from_file(filename)] # create graph using this response JSON G = create_graph(response_jsons, bidirectional=bidirectional, retain_all=retain_all, name=name) # simplify the graph topology as the last step. if simplify: G = simplify_graph(G) log('graph_from_file() returning graph with {:,} nodes and {:,} edges'.format(len(list(G.nodes())), len(list(G.edges())))) return G
0.00173
def size(self, filename: str) -> int: '''Get size of file. Coroutine. ''' yield from self._control_stream.write_command(Command('SIZE', filename)) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('File size', ReplyCodes.file_status, reply) try: return int(reply.text.strip()) except ValueError: return
0.007126
def open_url_in_browser(url, browsername=None, fallback=False): r""" Opens a url in the specified or default browser Args: url (str): web url CommandLine: python -m utool.util_grabdata --test-open_url_in_browser Example: >>> # DISABLE_DOCTEST >>> # SCRIPT >>> from utool.util_grabdata import * # NOQA >>> url = 'http://www.jrsoftware.org/isdl.php' >>> open_url_in_browser(url, 'chrome') """ import webbrowser print('[utool] Opening url=%r in browser' % (url,)) if browsername is None: browser = webbrowser.open(url) else: browser = get_prefered_browser(pref_list=[browsername], fallback=fallback) return browser.open(url)
0.002699
def batchseeds(args): """ %prog batchseeds folder Extract seed metrics for each image in a directory. """ from jcvi.formats.pdf import cat xargs = args[1:] p = OptionParser(batchseeds.__doc__) opts, args, iopts = add_seeds_options(p, args) if len(args) != 1: sys.exit(not p.print_help()) folder, = args folder = folder.rstrip('/') outdir = folder + "-debug" outfile = folder + "-output.tsv" assert op.isdir(folder) images = [] jsonfile = opts.calibrate or op.join(folder, "calibrate.json") if not op.exists(jsonfile): jsonfile = None for im in iglob(folder, "*.jpg,*.JPG,*.png"): if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")): continue if op.basename(im).startswith("calibrate"): continue images.append(im) fw = must_open(outfile, 'w') print(Seed.header(calibrate=jsonfile), file=fw) nseeds = 0 for im in images: imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs if jsonfile: imargs += ["--calibrate={0}".format(jsonfile)] objects = seeds(imargs) for o in objects: print(o, file=fw) nseeds += len(objects) fw.close() logging.debug("Processed {0} images.".format(len(images))) logging.debug("A total of {0} objects written to `{1}`.".\ format(nseeds, outfile)) pdfs = iglob(outdir, "*.pdf") outpdf = folder + "-output.pdf" cat(pdfs + ["--outfile={0}".format(outpdf)]) logging.debug("Debugging information written to `{0}`.".format(outpdf)) return outfile
0.001809
def do_copy(self, subcmd, opts, *args): """Duplicate something in working copy or repository, remembering history. usage: copy SRC DST SRC and DST can each be either a working copy (WC) path or URL: WC -> WC: copy and schedule for addition (with history) WC -> URL: immediately commit a copy of WC to URL URL -> WC: check out URL into WC, schedule for addition URL -> URL: complete server-side copy; used to branch & tag ${cmd_option_list} """ print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
0.006088
def music_info(songid): """ Get music info from baidu music api """ if isinstance(songid, list): songid = ','.join(songid) data = { "hq": 1, "songIds": songid } res = requests.post(MUSIC_INFO_URL, data=data) info = res.json() music_data = info["data"] songs = [] for song in music_data["songList"]: song_link, size = _song_link(song, music_data["xcode"]) songs.append({ "name": song["songName"], "singer": song["artistName"], "lrc_link": song["lrcLink"], "song_link": song_link, "size": size }) return songs
0.001506
def section_exists(self, section): """ Checks if given section exists. Usage:: >>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \ "[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"] >>> sections_file_parser = SectionsFileParser() >>> sections_file_parser.content = content >>> sections_file_parser.parse() <foundations.parsers.SectionsFileParser object at 0x845683844> >>> sections_file_parser.section_exists("Section A") True >>> sections_file_parser.section_exists("Section C") False :param section: Section to check existence. :type section: unicode :return: Section existence. :rtype: bool """ if section in self.__sections: LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self)) return True else: LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self)) return False
0.004583
def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name): """ Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired with the lists of every trace Ids (list<i64>) from the server to client. The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps contains the key - client_service_name and value - list<trace_id>. Parameters: - time_stamp - service_name - rpc_name """ self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name) return self.recv_getServiceNamesToTraceIds()
0.004559
def reparentAll(self): ''' Fixes some of the parental relationships lost in parsing the Breathe graph. File relationships are recovered in :func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. This method simply calls in this order: 1. :func:`~exhale.graph.ExhaleRoot.reparentUnions` 2. :func:`~exhale.graph.ExhaleRoot.reparentClassLike` 3. :func:`~exhale.graph.ExhaleRoot.reparentDirectories` 4. :func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` 5. :func:`~exhale.graph.ExhaleRoot.reparentNamespaces` ''' self.reparentUnions() self.reparentClassLike() self.reparentDirectories() self.renameToNamespaceScopes() self.reparentNamespaces() # make sure all children lists are unique (no duplicate children) for node in self.all_nodes: node.children = list(set(node.children))
0.004283
def run_multiple_processes(args_list: List[List[str]], die_on_failure: bool = True) -> None: """ Fire up multiple processes, and wait for them to finihs. Args: args_list: command arguments for each process die_on_failure: see :func:`wait_for_processes` """ for procargs in args_list: start_process(procargs) # Wait for them all to finish wait_for_processes(die_on_failure=die_on_failure)
0.002141
def find_info(name=None): """Find the needed city server information.""" if not name: return list(servers.keys()) name = name.lower() if name in servers: info = servers[name] else: raise CityNotFound("Could not find the specified city: %s" % name) return info
0.006494
def ticket_audits_list(self, cursor=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_audits#list-all-ticket-audits" api_path = "/api/v2/ticket_audits.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if cursor: api_query.update({ "cursor": cursor, }) return self.call(api_path, query=api_query, **kwargs)
0.006061
def convert_durations(metric): """ Convert session duration metrics from seconds to milliseconds. """ if metric[0] == 'avgSessionDuration' and metric[1]: new_metric = (metric[0], metric[1] * 1000) else: new_metric = metric return new_metric
0.003571
def calc_equivalent_modulus(self): """Calculates the equivalent laminate properties. The following attributes are calculated: e1, e2, g12, nu12, nu21 """ AI = np.matrix(self.ABD, dtype=np.float64).I a11, a12, a22, a33 = AI[0,0], AI[0,1], AI[1,1], AI[2,2] self.e1 = 1./(self.h*a11) self.e2 = 1./(self.h*a22) self.g12 = 1./(self.h*a33) self.nu12 = - a12 / a11 self.nu21 = - a12 / a22
0.012632
def cache_train(self): """ Loads the data for this classifier from a cache file :return: whether or not we were successful :rtype: bool """ filename = self.get_cache_location() if not os.path.exists(filename): return False categories = pickle.load(open(filename, 'rb')) assert isinstance(categories, BayesCategories), \ "Cache data is either corrupt or invalid" self.categories = categories # Updating our per-category overall probabilities self.calculate_category_probability() return True
0.0032