sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def methods(self): """ Run the typing methods """ self.contamination_detection() ReportImage(self, 'confindr') self.run_genesippr() ReportImage(self, 'genesippr') self.run_sixteens() self.run_mash() self.run_gdcs() ReportImage(self, 'gdcs')
Run the typing methods
entailment
def contamination_detection(self): """ Calculate the levels of contamination in the reads """ self.qualityobject = quality.Quality(self) self.qualityobject.contamination_finder(input_path=self.sequencepath, report_path=self.reportpath)
Calculate the levels of contamination in the reads
entailment
def run_genesippr(self): """ Run the genesippr analyses """ GeneSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='genesippr', cutoff=0.95, pipeline=False, revbait=False)
Run the genesippr analyses
entailment
def run_sixteens(self): """ Run the 16S analyses using the filtered database """ SixteensFull(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='sixteens_full', cutoff=0.985)
Run the 16S analyses using the filtered database
entailment
def run_mash(self): """ Run MASH to determine the closest refseq genomes """ self.pipeline = True mash.Mash(inputobject=self, analysistype='mash')
Run MASH to determine the closest refseq genomes
entailment
def complete(self): """ Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA' """ # Boolean to store the completeness of the analyses allcomplete = True # Clear the list of samples that still require more sequence data self.incomplete = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # If the sample has been tagged as incomplete, only add it to the complete metadata list if the # pipeline is on its final iteration if sample.general.incomplete: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) except AttributeError: sample.general.complete = True self.completemetadata.append(sample) else: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) # If all the samples are complete, set the global variable for run completeness to True if allcomplete: self.analysescomplete = True
Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA'
entailment
def protected_operation(fn): """ Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The parent_object must contain: * a request * with a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used. """ @functools.wraps(fn) def advice(parent_object, *args, **kw): response = _advice(parent_object.request) if response is not None: return response else: return fn(parent_object, *args, **kw) return advice
Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The parent_object must contain: * a request * with a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used.
entailment
def protected_operation_with_request(fn): """ Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The request must contain a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used. """ @functools.wraps(fn) def wrapped(request, *args, **kwargs): response = _advice(request) if response is not None: return response else: return fn(request, *args, **kwargs) return wrapped
Use this decorator to prevent an operation from being executed when the related uri resource is still in use. The request must contain a registry.queryUtility(IReferencer) :raises pyramid.httpexceptions.HTTPConflict: Signals that we don't want to delete a certain URI because it's still in use somewhere else. :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used.
entailment
def protected_view(view, info): """allows adding `protected=True` to a view_config`""" if info.options.get('protected'): def wrapper_view(context, request): response = _advice(request) if response is not None: return response else: return view(context, request) return wrapper_view return view
allows adding `protected=True` to a view_config`
entailment
def syncdb(pool=None): """ Create tables if they don't exist """ from flask_philo_sqlalchemy.schema import Base # noqa from flask_philo_sqlalchemy.orm import BaseModel # noqa from flask_philo_sqlalchemy.connection import create_pool if pool is None: pool = create_pool() for conn_name, conn in pool.connections.items(): Base.metadata.create_all(conn.engine)
Create tables if they don't exist
entailment
def checkInstalledPip(package, speak=True, speakSimilar=True): """checks if a given package is installed on pip""" packages = sorted([i.key for i in pip.get_installed_distributions()]) installed = package in packages similar = None if not installed: similar = [pkg for pkg in packages if package in pkg] if speak: speakInstalledPackages(package, "pip", installed, similar, speakSimilar) return (installed, similar)
checks if a given package is installed on pip
entailment
def checkInstalledBrew(package, similar=True, speak=True, speakSimilar=True): """checks if a given package is installed on homebrew""" packages = subprocess.check_output(['brew', 'list']).split() installed = package in packages similar = [] if not installed: similar = [pkg for pkg in packages if package in pkg] if speak: speakInstalledPackages(package, "homebrew", installed, similar, speakSimilar) return (installed, similar)
checks if a given package is installed on homebrew
entailment
def init_module(remote_credences=None,local_path=None): """Connnexion informations : remote_credences for remote acces OR local_path for local access""" if remote_credences is not None: RemoteConnexion.HOST = remote_credences["DB"]["host"] RemoteConnexion.USER = remote_credences["DB"]["user"] RemoteConnexion.PASSWORD = remote_credences["DB"]["password"] RemoteConnexion.NAME = remote_credences["DB"]["name"] MonoExecutant.ConnectionClass = RemoteConnexion Executant.ConnectionClass = RemoteConnexion abstractRequetesSQL.setup_marks("psycopg2") elif local_path is not None: LocalConnexion.PATH = local_path MonoExecutant.ConnectionClass = LocalConnexion Executant.ConnectionClass = LocalConnexion abstractRequetesSQL.setup_marks("sqlite3") else: raise ValueError("Sql module should be init with one of remote or local mode !") logging.info(f"Sql module initialized with {MonoExecutant.ConnectionClass.__name__}")
Connnexion informations : remote_credences for remote acces OR local_path for local access
entailment
def cree_local_DB(scheme): """Create emmpt DB according to the given scheme : dict { table : [ (column_name, column_type), .. ]} Usefull at installation of application (and for developement) """ conn = LocalConnexion() req = "" for table, fields in scheme.items(): req += f"DROP TABLE IF EXISTS {table};" req_fields = ", ".join(f'{c_name} {c_type}' for c_name, c_type in fields) req += f"""CREATE TABLE {table} ( {req_fields} ) ;""" cur = conn.cursor() cur.executescript(req) conn.connexion.commit() conn.connexion.close() logging.info("Database created with succes.")
Create emmpt DB according to the given scheme : dict { table : [ (column_name, column_type), .. ]} Usefull at installation of application (and for developement)
entailment
def execute(self, requete_SQL): """Execute one or many requests requete_SQL may be a tuple(requete,args) or a list of such tuples Return the result or a list of results """ try: cursor = self.cursor() if isinstance(requete_SQL,tuple): res = self._execute_one(cursor,*requete_SQL) else: res = [] for r in requete_SQL: if r: res.append(self._execute_one(cursor,*r)) except self.SQL.Error as e: raise StructureError(f"SQL error ! Details : \n {e}") else: self.connexion.commit() finally: self.connexion.close() return res
Execute one or many requests requete_SQL may be a tuple(requete,args) or a list of such tuples Return the result or a list of results
entailment
def placeholders(cls,dic): """Placeholders for fields names and value binds""" keys = [str(x) for x in dic] entete = ",".join(keys) placeholders = ",".join(cls.named_style.format(x) for x in keys) entete = f"({entete})" placeholders = f"({placeholders})" return entete, placeholders
Placeholders for fields names and value binds
entailment
def jsonise(dic): """Renvoie un dictionnaire dont les champs dont compatibles avec SQL Utilise Json. Attention à None : il faut laisser None et non pas null""" d = {} for k, v in dic.items(): if type(v) in abstractRequetesSQL.TYPES_PERMIS: d[k] = v else: try: d[k] = json.dumps(v, ensure_ascii=False, cls=formats.JsonEncoder) except ValueError as e: logging.exception("Erreur d'encodage JSON !") raise e return d
Renvoie un dictionnaire dont les champs dont compatibles avec SQL Utilise Json. Attention à None : il faut laisser None et non pas null
entailment
def insert(table, datas, avoid_conflict=False): """ Insert row from datas :param table: Safe table name :param datas: List of dicts. :param avoid_conflict: Allows ignoring error if already exists (do nothing then) :return: """ if avoid_conflict: debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING""" else: debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *""" l = [abstractRequetesSQL.formate(debut, table=table, INSERT=d, args=d) for d in datas if d] return Executant(l)
Insert row from datas :param table: Safe table name :param datas: List of dicts. :param avoid_conflict: Allows ignoring error if already exists (do nothing then) :return:
entailment
def update(cls,table, dic, Id): """ Update row with Id from table. Set fields given by dic.""" if dic: req = "UPDATE {table} SET {SET} WHERE id = " + cls.named_style.format('__id') + " RETURNING * " r = abstractRequetesSQL.formate(req, SET=dic, table=table, args=dict(dic, __id=Id)) return MonoExecutant(r) return MonoExecutant((f"SELECT * FROM {table} WHERE id = " + cls.named_style.format('__id'), {"__id": Id}))
Update row with Id from table. Set fields given by dic.
entailment
def cree(table, dic, avoid_conflict=False): """ Create ONE row from dic and returns the entry created """ if avoid_conflict: req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING RETURNING *""" else: req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *""" r = abstractRequetesSQL.formate(req, table=table, INSERT=dic, args=dic) return MonoExecutant(r)
Create ONE row from dic and returns the entry created
entailment
def supprime(cls,table, **kwargs): """ Remove entries matchin given condition kwargs is a dict of column name : value , with length ONE. """ assert len(kwargs) == 1 field, value = kwargs.popitem() req = f"""DELETE FROM {table} WHERE {field} = """ + cls.mark_style args = (value,) return MonoExecutant((req, args))
Remove entries matchin given condition kwargs is a dict of column name : value , with length ONE.
entailment
def trace(self, context, obj): """Enumerate the children of the given object, as would be visible and utilized by dispatch.""" root = obj if isroutine(obj): yield Crumb(self, root, endpoint=True, handler=obj, options=opts(obj)) return for name, attr in getmembers(obj if isclass(obj) else obj.__class__): if name == '__getattr__': sig = signature(attr) path = '{' + list(sig.parameters.keys())[1] + '}' reta = sig.return_annotation if reta is not sig.empty: if callable(reta) and not isclass(reta): yield Crumb(self, root, path, endpoint=True, handler=reta, options=opts(reta)) else: yield Crumb(self, root, path, handler=reta) else: yield Crumb(self, root, path, handler=attr) del sig, path, reta continue elif name == '__call__': yield Crumb(self, root, None, endpoint=True, handler=obj) continue if self.protect and name[0] == '_': continue yield Crumb(self, root, name, endpoint=callable(attr) and not isclass(attr), handler=attr, options=opts(attr))
Enumerate the children of the given object, as would be visible and utilized by dispatch.
entailment
def main(argv=None): ''' Main entry-point for calling layouts directly as a program. ''' # Prep argparse ap = argparse.ArgumentParser( description='Basic query options for Python HID-IO Layouts repository', ) ap.add_argument('--list', action='store_true', help='List available layout aliases.') ap.add_argument('--get', metavar='NAME', help='Retrieve the given layout, and return the JSON data') # Parse arguments args = ap.parse_args(argv) # Create layouts context manager mgr = Layouts() # Check if generating a list if args.list: for name in mgr.list_layouts(): print(name) # Retrieve JSON layout if args.get is not None: layout = mgr.get_layout(args.get) print(json.dumps(layout.json()))
Main entry-point for calling layouts directly as a program.
entailment
def retrieve_github_cache(self, github_path, version, cache_dir, token): ''' Retrieves a cache of the layouts git repo from GitHub @param github_path: Location of the git repo on GitHub (e.g. hid-io/layouts) @param version: git reference for the version to download (e.g. master) @param cache_dir: Directory to operate on external cache from @param token: GitHub access token ''' # Check for environment variable Github token token = os.environ.get('GITHUB_APIKEY', None) # Retrieve repo information try: gh = Github(token) repo = gh.get_repo(github_path) commit = repo.get_commit(version) commits = repo.get_commits() total_commits = 0 commit_number = 0 for cmt in commits: if commit == cmt: commit_number = total_commits total_commits += 1 commit_number = total_commits - commit_number tar_url = repo.get_archive_link('tarball', commit.sha) except GithubException.RateLimitExceededException: if token is None: log.warning("GITHUB_APIKEY is not set!") raise # GitHub only uses the first 7 characters of the sha in the download dirname_orig = "{}-{}".format(github_path.replace('/', '-'), commit.sha[:7]) dirname_orig_path = os.path.join(cache_dir, dirname_orig) # Adding a commit number so it's clear which is the latest version without requiring git dirname = "{}-{}".format(commit_number, dirname_orig) dirname_path = os.path.join(cache_dir, dirname) # If directory doesn't exist, check if tarball does if not os.path.isdir(dirname_path): filename = "{}.tar.gz".format(dirname) filepath = os.path.join(cache_dir, filename) # If tarball doesn't exist, download it if not os.path.isfile(filepath): # Retrieve tar file chunk_size = 2000 req = requests.get(tar_url, stream=True) with open(filepath, 'wb') as infile: for chunk in req.iter_content(chunk_size): infile.write(chunk) # Extract tarfile tar = tarfile.open(filepath) tar.extractall(cache_dir) os.rename(dirname_orig_path, dirname_path) tar.close() # Remove tar.gz os.remove(filepath)
Retrieves a cache of the layouts git repo from GitHub @param github_path: Location of the git repo on GitHub (e.g. hid-io/layouts) @param version: git reference for the version to download (e.g. master) @param cache_dir: Directory to operate on external cache from @param token: GitHub access token
entailment
def get_layout(self, name): ''' Returns the layout with the given name ''' layout_chain = [] # Retrieve initial layout file try: json_data = self.json_files[self.layout_names[name]] except KeyError: log.error('Could not find layout: %s', name) log.error('Layouts path: %s', self.layout_path) raise layout_chain.append(Layout(name, json_data)) # Recursively locate parent layout files parent = layout_chain[-1].parent() while parent is not None: # Find the parent parent_path = None for path in self.json_file_paths: if os.path.normcase(os.path.normpath(parent)) in os.path.normcase(path): parent_path = path # Make sure a path was found if parent_path is None: raise UnknownLayoutPathException('Could not find: {}'.format(parent_path)) # Build layout for parent json_data = self.json_files[parent_path] layout_chain.append(Layout(parent_path, json_data)) # Check parent of parent parent = layout_chain[-1].parent() # Squash layout files layout = self.squash_layouts(layout_chain) return layout
Returns the layout with the given name
entailment
def dict_merge(self, merge_to, merge_in): ''' Recursively merges two dicts Overwrites any non-dictionary items merge_to <- merge_in Modifies merge_to dictionary @param merge_to: Base dictionary to merge into @param merge_in: Dictionary that may overwrite elements in merge_in ''' for key, value in merge_in.items(): # Just add, if the key doesn't exist yet # Or if set to None/Null if key not in merge_to.keys() or merge_to[key] is None: merge_to[key] = copy.copy(value) continue # Overwrite case, check for types # Make sure types are matching if not isinstance(value, type(merge_to[key])): raise MergeException('Types do not match! {}: {} != {}'.format(key, type(value), type(merge_to[key]))) # Check if this is a dictionary item, in which case recursively merge if isinstance(value, dict): self.dict_merge(merge_to[key], value) continue # Otherwise just overwrite merge_to[key] = copy.copy(value)
Recursively merges two dicts Overwrites any non-dictionary items merge_to <- merge_in Modifies merge_to dictionary @param merge_to: Base dictionary to merge into @param merge_in: Dictionary that may overwrite elements in merge_in
entailment
def squash_layouts(self, layouts): ''' Returns a squashed layout The first element takes precedence (i.e. left to right). Dictionaries are recursively merged, overwrites only occur on non-dictionary entries. [0,1] 0: test: 'my data' 1: test: 'stuff' Result: test: 'my data' @param layouts: List of layouts to merge together @return: New layout with list of layouts squash merged ''' top_layout = layouts[0] json_data = {} # Generate a new container Layout layout = Layout(top_layout.name(), json_data, layouts) # Merge in each of the layouts for mlayout in reversed(layouts): # Overwrite all fields, *except* dictionaries # For dictionaries, keep recursing until non-dictionaries are found self.dict_merge(layout.json(), mlayout.json()) return layout
Returns a squashed layout The first element takes precedence (i.e. left to right). Dictionaries are recursively merged, overwrites only occur on non-dictionary entries. [0,1] 0: test: 'my data' 1: test: 'stuff' Result: test: 'my data' @param layouts: List of layouts to merge together @return: New layout with list of layouts squash merged
entailment
def dict(self, name, key_caps=False, value_caps=False): ''' Returns a JSON dict @key_caps: Converts all dictionary keys to uppercase @value_caps: Converts all dictionary values to uppercase @return: JSON item (may be a variable, list or dictionary) ''' # Invalid Dictionary if not isinstance(self.json_data[name], dict): raise InvalidDictionaryException # Convert key and/or values of dictionary to uppercase output = {} for key, value in self.json_data[name].items(): output[key.upper() if key_caps else key] = value.upper() if value_caps else value return output
Returns a JSON dict @key_caps: Converts all dictionary keys to uppercase @value_caps: Converts all dictionary values to uppercase @return: JSON item (may be a variable, list or dictionary)
entailment
def locale(self): ''' Do a lookup for the locale code that is set for this layout. NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0 @return: Tuple (<USB HID locale code>, <name>) ''' name = self.json_data['hid_locale'] # Set to Undefined/0 if not set if name is None: name = "Undefined" return (int(self.json_data['from_hid_locale'][name]), name)
Do a lookup for the locale code that is set for this layout. NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0 @return: Tuple (<USB HID locale code>, <name>)
entailment
def compose(self, text, minimal_clears=False, no_clears=False): ''' Returns the sequence of combinations necessary to compose given text. If the text expression is not possible with the given layout an ComposeException is thrown. Iterate over the string, converting each character into a key sequence. Between each character, an empty combo is inserted to handle duplicate strings (and USB HID codes between characters) @param text: Input UTF-8 string @param minimal_clears: Set to True to minimize the number of code clears. False (default) includes a clear after every character. @param no_clears: Set to True to not add any code clears (useful for input sequences). False (default) to include code clears. @returns: Sequence of combinations needed to generate the given text string ''' sequence = [] clear = self.json_data['to_hid_keyboard']['0x00'] # No Event for char in text: # Make sure the composition element is available if char not in self.json_data['composition']: raise ComposeException("'{}' is not defined as a composition in the layout '{}'".format(char, self.name)) # Lookup the sequence to handle this character lookup = self.json_data['composition'][char] # If using minimal clears, check to see if we need to re-use any codes # Only need to check the most recent addition with the first combo if sequence and set(tuple(lookup[0])) & set(tuple(sequence[-1])) and not no_clears: sequence.extend([[clear]]) # Add to overall sequence sequence.extend(lookup) # Add empty combo for sequence splitting if not minimal_clears and not no_clears: # Blindly add a clear combo between characters sequence.extend([[clear]]) # When using minimal clears, we still need to add a final clear if minimal_clears and not no_clears: sequence.extend([[clear]]) return sequence
Returns the sequence of combinations necessary to compose given text. If the text expression is not possible with the given layout an ComposeException is thrown. Iterate over the string, converting each character into a key sequence. Between each character, an empty combo is inserted to handle duplicate strings (and USB HID codes between characters) @param text: Input UTF-8 string @param minimal_clears: Set to True to minimize the number of code clears. False (default) includes a clear after every character. @param no_clears: Set to True to not add any code clears (useful for input sequences). False (default) to include code clears. @returns: Sequence of combinations needed to generate the given text string
entailment
def main(self): """ Run the necessary methods in the correct order """ logging.info('Starting {at} analysis pipeline'.format(at=self.analysistype)) # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples self.threads = int(self.cpus / len(self.runmetadata.samples)) if self.cpus / len(self.runmetadata.samples) > 1 \ else 1 if self.genesippr: # Run the genesippr analyses self.analysistype = 'genesippr' self.targetpath = os.path.join(self.reffilepath, self.analysistype) Sippr(inputobject=self, cutoff=0.90, averagedepth=5) # Create the reports self.reports = Reports(self) Reports.reporter(self.reports) if self.sixteens: # Run the 16S analyses SixteensFull(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='sixteens_full', cutoff=0.985) if self.closestreference: self.pipeline = True mash.Mash(inputobject=self, analysistype='mash') if self.rmlst: rmlst = MLSTSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='rMLST', cutoff=1.0, pipeline=True) rmlst.runner() if self.resistance: # ResFinding res = Resistance(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='resfinder', cutoff=0.7, pipeline=False, revbait=True) res.main() if self.virulence: self.genus_specific() Virulence(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='virulence', cutoff=0.95, pipeline=False, revbait=True) if self.gdcs: self.genus_specific() # Run the GDCS analysis self.analysistype = 'GDCS' self.targetpath = os.path.join(self.reffilepath, self.analysistype) Sippr(inputobject=self, cutoff=0.95, k=self.gdcs_kmer_size) # Create the reports self.reports = Reports(self) Reports.gdcsreporter(self.reports) if self.mlst: self.genus_specific() mlst = MLSTSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='MLST', cutoff=1.0, pipeline=True) mlst.runner() # Serotyping if self.serotype: self.genus_specific() SeroSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='serosippr', cutoff=0.90, pipeline=True) # Point mutation detection if self.pointfinder: self.genus_specific() PointSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='pointfinder', cutoff=0.85, pipeline=True, revbait=True) if self.user_genes: custom = CustomGenes(args=self, kmer_size=self.kmer_size, allow_soft_clips=self.allow_soft_clips) custom.main() # Print the metadata MetadataPrinter(self)
Run the necessary methods in the correct order
entailment
def genus_specific(self): """ For genus-specific targets, MLST and serotyping, determine if the closest refseq genus is known - i.e. if 16S analyses have been performed. Perform the analyses if required """ # Initialise a variable to store whether the necessary analyses have already been performed closestrefseqgenus = False for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: closestrefseqgenus = sample.general.closestrefseqgenus except AttributeError: pass # Perform the 16S analyses as required if not closestrefseqgenus: logging.info('Must perform MASH analyses to determine genera of samples') self.pipeline = True # Run the analyses mash.Mash(self, 'mash')
For genus-specific targets, MLST and serotyping, determine if the closest refseq genus is known - i.e. if 16S analyses have been performed. Perform the analyses if required
entailment
def pause(): """Tell iTunes to pause""" if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", PAUSE], stdout=subprocess.PIPE).communicate()
Tell iTunes to pause
entailment
def resume(): """Tell iTunes to resume""" if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", RESUME], stdout=subprocess.PIPE).communicate()
Tell iTunes to resume
entailment
def skip(): """Tell iTunes to skip a song""" if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", SKIP], stdout=subprocess.PIPE).communicate()
Tell iTunes to skip a song
entailment
def play(song, artist=None, album=None): """Tells iTunes to play a given song/artist/album - MACOSX ONLY""" if not settings.platformCompatible(): return False if song and not artist and not album: (output, error) = subprocess.Popen(["osascript", "-e", DEFAULT_ITUNES_PLAY % (song, song, song)], stdout=subprocess.PIPE).communicate() if output: speech.speak("Playing " + output) else: speech.speak("Unable to find " + song + " in your library.") elif song and artist and not album: (output, error) = subprocess.Popen(["osascript", "-e", ITUNES_SONG_AND_ARTIST % (song, artist, song, artist)], stdout=subprocess.PIPE).communicate() if output: speech.speak("Playing " + output) else: speech.speak("Unable to find " + song + " in your library.") elif album and artist and not song: (output, error) = subprocess.Popen(["osascript", "-e", ITUNES_ALBUM_AND_ARTIST % (artist, album)], stdout=subprocess.PIPE).communicate() if output: speech.speak("Playing " + output) else: speech.speak("Unable to find " + song + " in your library.") elif album and not artist and not song: (output, error) = subprocess.Popen(["osascript", "-e", ITUNES_ALBUM % (album)], stdout=subprocess.PIPE).communicate() if output: speech.speak("Playing " + output) else: speech.speak("Unable to find " + song + " in your library.") elif artist and not album and not song: (output, error) = subprocess.Popen(["osascript", "-e", ITUNES_ARTIST % (artist)], stdout=subprocess.PIPE).communicate() if output: speech.speak("Playing " + output) else: speech.speak("Unable to find " + song + " in your library.")
Tells iTunes to play a given song/artist/album - MACOSX ONLY
entailment
def service_provider(*services): """ This is a class decorator that declares a class to provide a set of services. It is expected that the class has a no-arg constructor and will be instantiated as a singleton. """ def real_decorator(clazz): instance = clazz() for service in services: global_lookup.add(service, instance) return clazz return real_decorator
This is a class decorator that declares a class to provide a set of services. It is expected that the class has a no-arg constructor and will be instantiated as a singleton.
entailment
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' compound = bool(re.search(r'(-| |=)', word)) syllabify = _syllabify_compound if compound else _syllabify_simplex syllabifications = list(syllabify(word)) for word, rules in rank(syllabifications): # post-process word = str(replace_umlauts(word, put_back=True)) rules = rules[1:] yield word, rules
Syllabify the given word, whether simplex or complex.
entailment
def apply_T2(word): '''There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = word offset = 0 for vv in vv_sequences(WORD): seq = vv.group(2) if not is_diphthong(seq) and not is_long(seq): i = vv.start(2) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T2' if word != WORD else '' return WORD, RULE
There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
entailment
def apply_T5(word): '''If a (V)VVV sequence contains a VV sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' WORD = word offset = 0 for vi in i_final_diphthong_vvv_sequences(WORD): s = max(vi.start(1), vi.start(2)) i = 2 if s + 2 < len(word) and is_vowel(word[s + 2]) else 0 # if '.' not in word[:s]: # continue if not (s == i == 0): i += s + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T5' if word != WORD else '' return WORD, RULE
If a (V)VVV sequence contains a VV sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].
entailment
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' WORD = word offset = 0 for vvv in vvv_sequences(WORD): seq = vvv.group(2) j = 2 if is_long(seq[:2]) else 1 if is_long(seq[1:]) else 0 if j: i = vvv.start(2) + j + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T6' if word != WORD else '' return WORD, RULE
If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
entailment
def apply_T7(word): '''If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].''' WORD = word offset = 0 for vvv in vvv_sequences(WORD): i = vvv.start(2) + 2 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T7' if word != WORD else '' return WORD, RULE
If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].
entailment
def apply_T8(word): '''Split /ie/, /uo/, or /yö/ sequences in syllables that do not take primary stress.''' WORD = word offset = 0 for vv in tail_diphthongs(WORD): i = vv.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T8' if word != WORD else '' return WORD, RULE
Split /ie/, /uo/, or /yö/ sequences in syllables that do not take primary stress.
entailment
def wsp(word): '''Return the number of unstressed heavy syllables.''' HEAVY = r'[ieaAoO]{1}[\.]*(u|y)[^ieaAoO]+(\.|$)' # # if the word is not monosyllabic, lop off the final syllable, which is # # extrametrical # if '.' in word: # word = word[:word.rindex('.')] # gather the indices of syllable boundaries delimiters = [i for i, char in enumerate(word) if char == '.'] if len(delimiters) % 2 != 0: delimiters.append(len(word)) unstressed = [] # gather the indices of unstressed positions for i, d in enumerate(delimiters): if i % 2 == 0: unstressed.extend(range(d + 1, delimiters[i + 1])) # find the number of unstressed heavy syllables heavies = re.finditer(HEAVY, word) violations = sum(1 for m in heavies if m.start(0) in unstressed) return violations
Return the number of unstressed heavy syllables.
entailment
def pk_prom(word): '''Return the number of stressed light syllables.''' LIGHT = r'[ieaAoO]{1}[\.]*(u|y)(\.|$)' # # if the word is not monosyllabic, lop off the final syllable, which is # # extrametrical # if '.' in word: # word = word[:word.rindex('.')] # gather the indices of syllable boundaries delimiters = [0, ] + [i for i, char in enumerate(word) if char == '.'] if len(delimiters) % 2 != 0: delimiters.append(len(word)) stressed = [] # gather the indices of stressed positions for i, d in enumerate(delimiters): if i % 2 == 0: stressed.extend(range(d + 1, delimiters[i + 1])) # find the number of stressed light syllables heavies = re.finditer(LIGHT, word) violations = sum(1 for m in heavies if m.start(1) in stressed) return violations
Return the number of stressed light syllables.
entailment
def ext(self, extension): """ Match files with an extension - e.g. 'js', 'txt' """ new_pathq = copy(self) new_pathq._pattern.ext = extension return new_pathq
Match files with an extension - e.g. 'js', 'txt'
entailment
def apply_T8(word): '''Split /ie/ sequences in syllables that do not take primary stress.''' WORD = word offset = 0 for ie in ie_sequences(WORD): i = ie.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T8' if word != WORD else '' return WORD, RULE
Split /ie/ sequences in syllables that do not take primary stress.
entailment
def apply_T9(word): '''Split /iu/ sequences that do not appear in the first, second, or final syllables.''' WORD = word index = 0 offset = 0 for iu in iu_sequences(WORD): if iu.start(1) != index: i = iu.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] index = iu.start(1) offset += 1 RULE = ' T9' if word != WORD else '' return WORD, RULE
Split /iu/ sequences that do not appear in the first, second, or final syllables.
entailment
def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
Returns a class from a string including module and class.
entailment
def import_object_ns(name_space, import_str, *args, **kwargs): """Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs)
Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace.
entailment
def install_virtualbox(distribution, force_setup=False): """ install virtualbox """ if 'ubuntu' in distribution: with hide('running', 'stdout'): sudo('DEBIAN_FRONTEND=noninteractive apt-get update') sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o " "Dpkg::Options::='--force-confdef' " "-o Dpkg::Options::='--force-confold' upgrade --force-yes") install_ubuntu_development_tools() apt_install(packages=['dkms', 'linux-headers-generic', 'build-essential']) sudo('wget -q ' 'https://www.virtualbox.org/download/oracle_vbox.asc -O- |' 'sudo apt-key add -') os = lsb_release() apt_string = ' '.join( ['deb', 'http://download.virtualbox.org/virtualbox/debian', '%s contrib' % os['DISTRIB_CODENAME']]) apt_add_repository_from_apt_string(apt_string, 'vbox.list') apt_install(packages=['virtualbox-5.0']) loaded_modules = sudo('lsmod') if 'vboxdrv' not in loaded_modules or force_setup: if 'Vivid Vervet' in run('cat /etc/os-release'): sudo('systemctl start vboxdrv') else: sudo('/etc/init.d/vboxdrv start') sudo('wget -c ' 'http://download.virtualbox.org/virtualbox/5.0.4/' 'Oracle_VM_VirtualBox_Extension_Pack-5.0.4-102546.vbox-extpack') # noqa sudo('VBoxManage extpack install --replace ' 'Oracle_VM_VirtualBox_Extension_Pack-5.0.4-102546.vbox-extpack')
install virtualbox
entailment
def install_vagrant_plugin(plugin, use_sudo=False): """ install vagrant plugin """ cmd = 'vagrant plugin install %s' % plugin with settings(hide('running', 'stdout')): if use_sudo: if plugin not in sudo('vagrant plugin list'): sudo(cmd) else: if plugin not in run('vagrant plugin list'): run(cmd)
install vagrant plugin
entailment
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting mashsippr analysis pipeline', self.starttime) if not self.pipeline: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Mash(self, self.analysistype)
Run the necessary methods in the correct order
entailment
def cree_widgets(self): """Create widgets and store them in self.widgets""" for t in self.FIELDS: if type(t) is str: attr, kwargs = t, {} else: attr, kwargs = t[0], t[1].copy() self.champs.append(attr) is_editable = kwargs.pop("is_editable", self.is_editable) args = [self.acces[attr], is_editable] with_base = kwargs.pop("with_base", False) if with_base: args.append(self.acces.base) if 'with_label' in kwargs: label = kwargs.pop('with_label') else: label = ASSOCIATION[attr][0] if kwargs: w = ASSOCIATION[attr][3](*args, **kwargs) else: w = ASSOCIATION[attr][3](*args) self.widgets[attr] = (w, label)
Create widgets and store them in self.widgets
entailment
def cree_ws_lecture(self, champs_ligne): """Alternative to create read only widgets. They should be set after.""" for c in champs_ligne: label = ASSOCIATION[c][0] w = ASSOCIATION[c][3](self.acces[c], False) w.setObjectName("champ-lecture-seule-details") self.widgets[c] = (w, label)
Alternative to create read only widgets. They should be set after.
entailment
def preservesurrogates(s): """ Function for splitting a string into a list of characters, preserving surrogate pairs. In python 2, unicode characters above 0x10000 are stored as surrogate pairs. For example, the Unicode character u"\U0001e900" is stored as the surrogate pair u"\ud83a\udd00": s = u"AB\U0001e900CD" len(s) -> 6 list(s) -> [u'A', u'B', u'\ud83a', u'\udd00', u'C', 'D'] len(preservesurrogates(s)) -> 5 list(preservesurrogates(s)) -> [u'A', u'B', u'\U0001e900', u'C', u'D'] :param s: String to split :return: List of characters """ if not isinstance(s, six.text_type): raise TypeError(u"String to split must be of type 'unicode'!") surrogates_regex_str = u"[{0}-{1}][{2}-{3}]".format(HIGH_SURROGATE_START, HIGH_SURROGATE_END, LOW_SURROGATE_START, LOW_SURROGATE_END) surrogates_regex = re.compile(u"(?:{0})|.".format(surrogates_regex_str)) return surrogates_regex.findall(s)
Function for splitting a string into a list of characters, preserving surrogate pairs. In python 2, unicode characters above 0x10000 are stored as surrogate pairs. For example, the Unicode character u"\U0001e900" is stored as the surrogate pair u"\ud83a\udd00": s = u"AB\U0001e900CD" len(s) -> 6 list(s) -> [u'A', u'B', u'\ud83a', u'\udd00', u'C', 'D'] len(preservesurrogates(s)) -> 5 list(preservesurrogates(s)) -> [u'A', u'B', u'\U0001e900', u'C', u'D'] :param s: String to split :return: List of characters
entailment
def _unichr(i): """ Helper function for taking a Unicode scalar value and returning a Unicode character. :param s: Unicode scalar value to convert. :return: Unicode character """ if not isinstance(i, int): raise TypeError try: return six.unichr(i) except ValueError: # Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)" return struct.pack("i", i).decode("utf-32")
Helper function for taking a Unicode scalar value and returning a Unicode character. :param s: Unicode scalar value to convert. :return: Unicode character
entailment
def _padded_hex(i, pad_width=4, uppercase=True): """ Helper function for taking an integer and returning a hex string. The string will be padded on the left with zeroes until the string is of the specified width. For example: _padded_hex(31, pad_width=4, uppercase=True) -> "001F" :param i: integer to convert to a hex string :param pad_width: (int specifying the minimum width of the output string. String will be padded on the left with '0' as needed. :param uppercase: Boolean indicating if we should use uppercase characters in the output string (default=True). :return: Hex string representation of the input integer. """ result = hex(i)[2:] # Remove the leading "0x" if uppercase: result = result.upper() return result.zfill(pad_width)
Helper function for taking an integer and returning a hex string. The string will be padded on the left with zeroes until the string is of the specified width. For example: _padded_hex(31, pad_width=4, uppercase=True) -> "001F" :param i: integer to convert to a hex string :param pad_width: (int specifying the minimum width of the output string. String will be padded on the left with '0' as needed. :param uppercase: Boolean indicating if we should use uppercase characters in the output string (default=True). :return: Hex string representation of the input integer.
entailment
def _uax44lm2transform(s): """ Helper function for taking a string (i.e. a Unicode character name) and transforming it via UAX44-LM2 loose matching rule. For more information, see <https://www.unicode.org/reports/tr44/#UAX44-LM2>. The rule is defined as follows: "UAX44-LM2. Ignore case, whitespace, underscore ('_'), and all medial hyphens except the hyphen in U+1180 HANGUL JUNGSEONG O-E." Therefore, correctly implementing the rule involves performing the following three operations, in order: 1. remove all medial hyphens (except the medial hyphen in the name for U+1180) 2. remove all whitespace and underscore characters 3. apply toLowercase() to both strings A "medial hyphen" is defined as follows (quoted from the above referenced web page): "In this rule 'medial hyphen' is to be construed as a hyphen occurring immediately between two letters in the normative Unicode character name, as published in the Unicode names list, and not to any hyphen that may transiently occur medially as a result of removing whitespace before removing hyphens in a particular implementation of matching. Thus the hyphen in the name U+10089 LINEAR B IDEOGRAM B107M HE-GOAT is medial, and should be ignored in loose matching, but the hyphen in the name U+0F39 TIBETAN MARK TSA -PHRU is not medial, and should not be ignored in loose matching." :param s: String to transform :return: String transformed per UAX44-LM2 loose matching rule. """ result = s # For the regex, we are using lookaround assertions to verify that there is a word character immediately before (the # lookbehind assertion (?<=\w)) and immediately after (the lookahead assertion (?=\w)) the hyphen, per the "medial # hyphen" definition that it is a hyphen occurring immediately between two letters. medialhyphen = re.compile(r"(?<=\w)-(?=\w)") whitespaceunderscore = re.compile(r"[\s_]", re.UNICODE) # Ok to hard code, this name should never change: https://www.unicode.org/policies/stability_policy.html#Name if result != "HANGUL JUNGSEONG O-E": result = medialhyphen.sub("", result) result = whitespaceunderscore.sub("", result) return result.lower()
Helper function for taking a string (i.e. a Unicode character name) and transforming it via UAX44-LM2 loose matching rule. For more information, see <https://www.unicode.org/reports/tr44/#UAX44-LM2>. The rule is defined as follows: "UAX44-LM2. Ignore case, whitespace, underscore ('_'), and all medial hyphens except the hyphen in U+1180 HANGUL JUNGSEONG O-E." Therefore, correctly implementing the rule involves performing the following three operations, in order: 1. remove all medial hyphens (except the medial hyphen in the name for U+1180) 2. remove all whitespace and underscore characters 3. apply toLowercase() to both strings A "medial hyphen" is defined as follows (quoted from the above referenced web page): "In this rule 'medial hyphen' is to be construed as a hyphen occurring immediately between two letters in the normative Unicode character name, as published in the Unicode names list, and not to any hyphen that may transiently occur medially as a result of removing whitespace before removing hyphens in a particular implementation of matching. Thus the hyphen in the name U+10089 LINEAR B IDEOGRAM B107M HE-GOAT is medial, and should be ignored in loose matching, but the hyphen in the name U+0F39 TIBETAN MARK TSA -PHRU is not medial, and should not be ignored in loose matching." :param s: String to transform :return: String transformed per UAX44-LM2 loose matching rule.
entailment
def _to_unicode_scalar_value(s): """ Helper function for converting a character or surrogate pair into a Unicode scalar value e.g. "\ud800\udc00" -> 0x10000 The algorithm can be found in older versions of the Unicode Standard. https://unicode.org/versions/Unicode3.0.0/ch03.pdf, Section 3.7, D28 Unicode scalar value: a number N from 0 to 0x10FFFF is defined by applying the following algorithm to a character sequence S: If S is a single, non-surrogate value U: N = U If S is a surrogate pair H, L: N = (H - 0xD800) * 0x0400 + (L - 0xDC00) + 0x10000 :param s: :return: """ if len(s) == 1: return ord(s) elif len(s) == 2: return (ord(s[0]) - 0xD800) * 0x0400 + (ord(s[1]) - 0xDC00) + 0x10000 else: raise ValueError
Helper function for converting a character or surrogate pair into a Unicode scalar value e.g. "\ud800\udc00" -> 0x10000 The algorithm can be found in older versions of the Unicode Standard. https://unicode.org/versions/Unicode3.0.0/ch03.pdf, Section 3.7, D28 Unicode scalar value: a number N from 0 to 0x10FFFF is defined by applying the following algorithm to a character sequence S: If S is a single, non-surrogate value U: N = U If S is a surrogate pair H, L: N = (H - 0xD800) * 0x0400 + (L - 0xDC00) + 0x10000 :param s: :return:
entailment
def _get_nr_prefix(i): """ Helper function for looking up the derived name prefix associated with a Unicode scalar value. :param i: Unicode scalar value. :return: String with the derived name prefix. """ for lookup_range, prefix_string in _nr_prefix_strings.items(): if i in lookup_range: return prefix_string raise ValueError("No prefix string associated with {0}!".format(i))
Helper function for looking up the derived name prefix associated with a Unicode scalar value. :param i: Unicode scalar value. :return: String with the derived name prefix.
entailment
def casefold(s, fullcasefold=True, useturkicmapping=False): """ Function for performing case folding. This function will take the input string s and return a copy of the string suitable for caseless comparisons. The input string must be of type 'unicode', otherwise a TypeError will be raised. For more information on case folding, see section 3.13 of the Unicode Standard. See also the following FAQ on the Unicode website: https://unicode.org/faq/casemap_charprop.htm By default, full case folding (where the string length may change) is done. It is possible to use simple case folding (single character mappings only) by setting the boolean parameter fullcasefold=False. By default, case folding does not handle the Turkic case of dotted vs dotless 'i'. To perform case folding using the special Turkic mappings, pass the boolean parameter useturkicmapping=True. For more info on the dotted vs dotless 'i', see the following web pages: https://en.wikipedia.org/wiki/Dotted_and_dotless_I http://www.i18nguy.com/unicode/turkish-i18n.html#problem :param s: String to transform :param fullcasefold: Boolean indicating if a full case fold (default is True) should be done. If False, a simple case fold will be performed. :param useturkicmapping: Boolean indicating if the special turkic mapping (default is False) for the dotted and dotless 'i' should be used. :return: Copy of string that has been transformed for caseless comparison. """ if not isinstance(s, six.text_type): raise TypeError(u"String to casefold must be of type 'unicode'!") lookup_order = "CF" if not fullcasefold: lookup_order = "CS" if useturkicmapping: lookup_order = "T" + lookup_order return u"".join([casefold_map.lookup(c, lookup_order=lookup_order) for c in preservesurrogates(s)])
Function for performing case folding. This function will take the input string s and return a copy of the string suitable for caseless comparisons. The input string must be of type 'unicode', otherwise a TypeError will be raised. For more information on case folding, see section 3.13 of the Unicode Standard. See also the following FAQ on the Unicode website: https://unicode.org/faq/casemap_charprop.htm By default, full case folding (where the string length may change) is done. It is possible to use simple case folding (single character mappings only) by setting the boolean parameter fullcasefold=False. By default, case folding does not handle the Turkic case of dotted vs dotless 'i'. To perform case folding using the special Turkic mappings, pass the boolean parameter useturkicmapping=True. For more info on the dotted vs dotless 'i', see the following web pages: https://en.wikipedia.org/wiki/Dotted_and_dotless_I http://www.i18nguy.com/unicode/turkish-i18n.html#problem :param s: String to transform :param fullcasefold: Boolean indicating if a full case fold (default is True) should be done. If False, a simple case fold will be performed. :param useturkicmapping: Boolean indicating if the special turkic mapping (default is False) for the dotted and dotless 'i' should be used. :return: Copy of string that has been transformed for caseless comparison.
entailment
def _build_unicode_character_database(self): """ Function for parsing the Unicode character data from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/ """ filename = "UnicodeData.txt" current_dir = os.path.abspath(os.path.dirname(__file__)) tag = re.compile(r"<\w+?>") with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp: for line in fp: if not line.strip(): continue data = line.strip().split(";") # Replace the start/end range markers with their proper derived names. if data[1].endswith((u"First>", u"Last>")) and _is_derived(int(data[0], 16)): data[1] = _get_nr_prefix(int(data[0], 16)) if data[1].startswith("HANGUL SYLLABLE"): # For Hangul syllables, use naming rule NR1 data[1] += _get_hangul_syllable_name(int(data[0], 16)) else: # Others should use naming rule NR2 data[1] += data[0] data[3] = int(data[3]) # Convert the Canonical Combining Class value into an int. if data[5]: # Convert the contents of the decomposition into characters, preserving tag info. data[5] = u" ".join([_hexstr_to_unichr(s) if not tag.match(s) else s for s in data[5].split()]) for i in [6, 7, 8]: # Convert the decimal, digit and numeric fields to either ints or fractions. if data[i]: if "/" in data[i]: data[i] = Fraction(data[i]) else: data[i] = int(data[i]) for i in [12, 13, 14]: # Convert the uppercase, lowercase and titlecase fields to characters. if data[i]: data[i] = _hexstr_to_unichr(data[i]) lookup_name = _uax44lm2transform(data[1]) uc_data = UnicodeCharacter(u"U+" + data[0], *data[1:]) self._unicode_character_database[int(data[0], 16)] = uc_data self._name_database[lookup_name] = uc_data # Fill out the "compressed" ranges of UnicodeData.txt i.e. fill out the remaining characters per the Name # Derivation Rules. See the Unicode Standard, ch. 4, section 4.8, Unicode Name Property for lookup_range, prefix_string in _nr_prefix_strings.items(): exemplar = self._unicode_character_database.__getitem__(lookup_range[0]) for item in lookup_range: hex_code = _padded_hex(item) new_name = prefix_string if prefix_string.startswith("HANGUL SYLLABLE"): # For Hangul, use naming rule NR1 new_name += _get_hangul_syllable_name(item) else: # Everything else uses naming rule NR2 new_name += hex_code uc_data = exemplar._replace(code=u"U+" + hex_code, name=new_name) self._unicode_character_database[item] = uc_data self._name_database[_uax44lm2transform(new_name)] = uc_data
Function for parsing the Unicode character data from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/
entailment
def lookup_by_name(self, name): """ Function for retrieving the UnicodeCharacter associated with a name. The name lookup uses the loose matching rule UAX44-LM2 for loose matching. See the following for more info: https://www.unicode.org/reports/tr44/#UAX44-LM2 For example: ucd = UnicodeData() ucd.lookup_by_name("LATIN SMALL LETTER SHARP S") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) ucd.lookup_by_name("latin_small_letter_sharp_s") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) :param name: Name of the character to look up. :return: UnicodeCharacter instance with data associated with the character. """ try: return self._name_database[_uax44lm2transform(name)] except KeyError: raise KeyError(u"Unknown character name: '{0}'!".format(name))
Function for retrieving the UnicodeCharacter associated with a name. The name lookup uses the loose matching rule UAX44-LM2 for loose matching. See the following for more info: https://www.unicode.org/reports/tr44/#UAX44-LM2 For example: ucd = UnicodeData() ucd.lookup_by_name("LATIN SMALL LETTER SHARP S") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) ucd.lookup_by_name("latin_small_letter_sharp_s") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) :param name: Name of the character to look up. :return: UnicodeCharacter instance with data associated with the character.
entailment
def lookup_by_partial_name(self, partial_name): """ Similar to lookup_by_name(name), this method uses loose matching rule UAX44-LM2 to attempt to find the UnicodeCharacter associated with a name. However, it attempts to permit even looser matching by doing a substring search instead of a simple match. This method will return a generator that yields instances of UnicodeCharacter where the partial_name passed in is a substring of the full name. For example: >>> ucd = UnicodeData() >>> for data in ucd.lookup_by_partial_name("SHARP S"): >>> print(data.code + " " + data.name) >>> >>> U+00DF LATIN SMALL LETTER SHARP S >>> U+1E9E LATIN CAPITAL LETTER SHARP S >>> U+266F MUSIC SHARP SIGN :param partial_name: Partial name of the character to look up. :return: Generator that yields instances of UnicodeCharacter. """ for k, v in self._name_database.items(): if _uax44lm2transform(partial_name) in k: yield v
Similar to lookup_by_name(name), this method uses loose matching rule UAX44-LM2 to attempt to find the UnicodeCharacter associated with a name. However, it attempts to permit even looser matching by doing a substring search instead of a simple match. This method will return a generator that yields instances of UnicodeCharacter where the partial_name passed in is a substring of the full name. For example: >>> ucd = UnicodeData() >>> for data in ucd.lookup_by_partial_name("SHARP S"): >>> print(data.code + " " + data.name) >>> >>> U+00DF LATIN SMALL LETTER SHARP S >>> U+1E9E LATIN CAPITAL LETTER SHARP S >>> U+266F MUSIC SHARP SIGN :param partial_name: Partial name of the character to look up. :return: Generator that yields instances of UnicodeCharacter.
entailment
def _load_unicode_block_info(self): """ Function for parsing the Unicode block info from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/ """ filename = "Blocks.txt" current_dir = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp: for line in fp: if not line.strip() or line.startswith("#"): continue # Skip empty lines or lines that are comments (comments start with '#') # Format: Start Code..End Code; Block Name block_range, block_name = line.strip().split(";") start_range, end_range = block_range.strip().split("..") self._unicode_blocks[six.moves.range(int(start_range, 16), int(end_range, 16) + 1)] = block_name.strip()
Function for parsing the Unicode block info from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/
entailment
def _build_casefold_map(self): """ Function for parsing the case folding data from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/ """ self._casefold_map = defaultdict(dict) filename = "CaseFolding.txt" current_dir = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp: for line in fp: if not line.strip() or line.startswith("#"): continue # Skip empty lines or lines that are comments (comments start with '#') code, status, mapping, name = line.split(";") src = _hexstr_to_unichr(code) target = u"".join([_hexstr_to_unichr(c) for c in mapping.strip().split()]) self._casefold_map[status.strip()][src] = target
Function for parsing the case folding data from the Unicode Character Database (UCD) and generating a lookup table. For more info on the UCD, see the following website: https://www.unicode.org/ucd/
entailment
def lookup(self, c, lookup_order="CF"): """ Function to lookup a character in the casefold map. The casefold map has four sub-tables, the 'C' or common table, the 'F' or full table, the 'S' or simple table and the 'T' or the Turkic special case table. These tables correspond to the statuses defined in the CaseFolding.txt file. We can specify the order of the tables to use for performing the lookup by the lookup_order parameter. Per the usage specified in the CaseFolding.txt file, we can use the 'C' and 'S' tables for doing a simple case fold. To perform a full case fold, we can use the 'C' and 'F' tables. The default behavior for this function is a full case fold (lookup_order="CF"). :param c: character to lookup :param lookup_order: """ if not isinstance(c, six.text_type): raise TypeError(u"Character to lookup must be of type 'unicode'!") for d in lookup_order: try: return self._casefold_map[d][c] except KeyError: pass return c
Function to lookup a character in the casefold map. The casefold map has four sub-tables, the 'C' or common table, the 'F' or full table, the 'S' or simple table and the 'T' or the Turkic special case table. These tables correspond to the statuses defined in the CaseFolding.txt file. We can specify the order of the tables to use for performing the lookup by the lookup_order parameter. Per the usage specified in the CaseFolding.txt file, we can use the 'C' and 'S' tables for doing a simple case fold. To perform a full case fold, we can use the 'C' and 'F' tables. The default behavior for this function is a full case fold (lookup_order="CF"). :param c: character to lookup :param lookup_order:
entailment
def load_from_json(data): """ Load a :class:`RegistryReponse` from a dictionary or a string (that will be parsed as json). """ if isinstance(data, str): data = json.loads(data) applications = [ ApplicationResponse.load_from_json(a) for a in data['applications'] ] if data['applications'] is not None else [] return RegistryResponse( data['query_uri'], data['success'], data['has_references'], data['count'], applications )
Load a :class:`RegistryReponse` from a dictionary or a string (that will be parsed as json).
entailment
def load_from_json(data): """ Load a :class:`ApplicationResponse` from a dictionary or string (that will be parsed as json). """ if isinstance(data, str): data = json.loads(data) items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else [] return ApplicationResponse( data['title'], data['uri'], data['service_url'], data['success'], data['has_references'], data['count'], items )
Load a :class:`ApplicationResponse` from a dictionary or string (that will be parsed as json).
entailment
def load_from_json(data): """ Load a :class:`Item` from a dictionary ot string (that will be parsed as json) """ if isinstance(data, str): data = json.loads(data) return Item(data['title'], data['uri'])
Load a :class:`Item` from a dictionary ot string (that will be parsed as json)
entailment
def apply_T11(word): '''If a VVV sequence contains a /u, y/-final diphthong and the third vowel is /i/, there is a syllable boundary between the diphthong and /i/.''' WORD = word offset = 0 for vvv in t11_vvv_sequences(WORD): # i = vvv.start(1) + (1 if vvv.group(1).startswith('i') else 2) + offset i = vvv.start(1) + (1 if vvv.group(1)[-1] in 'uy' else 2) + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T11' if word != WORD else '' return WORD, RULE
If a VVV sequence contains a /u, y/-final diphthong and the third vowel is /i/, there is a syllable boundary between the diphthong and /i/.
entailment
def apply_T12(word): '''There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = word offset = 0 for vv in new_vv(WORD): # import pdb; pdb.set_trace() seq = vv.group(1) if not is_diphthong(seq) and not is_long(seq): i = vv.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T2' if word != WORD else '' return WORD, RULE
There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
entailment
def _syllabify_simplex(word): '''Syllabify the given word.''' word, rules = apply_T1(word) if re.search(r'[^ieAyOauo]*([ieAyOauo]{2})[^ieAyOauo]*', word): word, T2 = apply_T2(word) word, T8 = apply_T8(word) word, T9 = apply_T9(word) rules += T2 + T8 + T9 # T4 produces variation syllabifications = list(apply_T4(word)) else: syllabifications = [(word, ''), ] for word, rule in syllabifications: RULES = rules + rule if re.search(r'[ieAyOauo]{3}', word): word, T6 = apply_T6(word) word, T5 = apply_T5(word) word, T10 = apply_T10(word) word, T7 = apply_T7(word) word, T2 = apply_T2(word) RULES += T5 + T6 + T10 + T7 + T2 RULES = RULES or ' T0' # T0 means no rules have applied yield word, RULES
Syllabify the given word.
entailment
def apply_T9(word): '''Split /iu/ sequences that do not appear in the first or second syllables. Split /iu/ sequences in the final syllable iff the final syllable would receive stress.''' WORD = word index = 0 offset = 0 for iu in iu_sequences(WORD): if iu.start(1) != index: i = iu.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] index = iu.start(1) offset += 1 # split any /iu/ sequence in the final syllable iff the final syllable # would receive stress -- to capture extrametricality if WORD.count('.') % 2 == 0: iu = iu_sequences(WORD, word_final=True) if iu: i = iu.start(1) + 1 WORD = WORD[:i] + '.' + WORD[i:] RULE = ' T9' if word != WORD else '' return WORD, RULE
Split /iu/ sequences that do not appear in the first or second syllables. Split /iu/ sequences in the final syllable iff the final syllable would receive stress.
entailment
def apply_T10(word): '''Any /iou/ sequence contains a syllable boundary between the first and second vowel.''' WORD = word offset = 0 for iou in iou_sequences(WORD): i = iou.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T10' if word != WORD else '' return WORD, RULE
Any /iou/ sequence contains a syllable boundary between the first and second vowel.
entailment
def T1(word): '''Insert a syllable boundary in front of every CV sequence.''' # split consonants and vowels: 'balloon' -> ['b', 'a', 'll', 'oo', 'n'] WORD = [i for i in re.split(r'([ieaouäöy]+)', word, flags=FLAGS) if i] # keep track of which sub-rules are applying sub_rules = set() # a count divisible by 2 indicates an even syllable count = 1 for i, v in enumerate(WORD): # T1B # If there is a consonant cluster word-initially, the entire cluster # forms the onset of the first syllable: # CCV > #CCV if i == 0 and phon.is_consonant(v[0]): sub_rules.add('b') elif phon.is_consonant(v[0]): count += 1 # True if the current syllable is unstressed, else False unstressed = count % 2 == 0 # T1C # If there is a consonant cluster word-finally, the entire cluster # forms the coda of the final syllable: # VCC# > VCC# if i + 1 == len(WORD): sub_rules.add('c') # T1D # If there is a bare "Finnish" consonant cluster word-medially and # the previous syllable receives stress, the first consonant of the # cluster forms the coda of the previous syllable (to create a # heavy syllable); otherwise, the whole cluster forms the onset of # the current syllable (this is the /kr/ rule): # 'VCCV > 'VC.CV, VCCV > V.CCV elif phon.is_cluster(v): sub_rules.add('d') WORD[i] = v[0] + '.' + v[1:] if unstressed else '.' + v elif phon.is_cluster(v[1:]): # T1E (optional) # If there is a word-medial "Finnish" consonant cluster that is # preceded by a sonorant consonant, if the previous syllable # receives stress, the sonorant consonant and the first # consonant of the cluster form the coda of the previous # syllable, and the remainder of the cluster forms the onset of # the current syllable: # 'VlCC > VlC.C if phon.is_sonorant(v[0]) and unstressed: sub_rules.add('e') WORD[i] = v[:2] + '.' + v[2:] # T1F # If there is a word-medial "Finnish" cluster that follows a # consonant, that first consonant forms the coda of the # previous syllable, and the cluster forms the onset of the # current syllable: # VCkr > VC.kr else: sub_rules.add('f') WORD[i] = v[0] + '.' + v[1:] # T1A # There is a syllable boundary in front of every CV sequence: # VCV > V.CV, CCV > C.CV else: WORD[i] = v[:-1] + '.' + v[-1] sub_rules.add('a') WORD = ''.join(WORD) rules = '' if word == WORD else ' T1' # + ''.join(sub_rules) # TODO: sort return WORD, rules
Insert a syllable boundary in front of every CV sequence.
entailment
def T2(word, rules): '''Split any VV sequence that is not a genuine diphthong or long vowel. E.g., [ta.e], [ko.et.taa]. This rule can apply within VVV+ sequences.''' WORD = word offset = 0 for vv in vv_sequences(WORD): seq = vv.group(1) if not phon.is_diphthong(seq) and not phon.is_long(seq): i = vv.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 rules += ' T2' if word != WORD else '' return WORD, rules
Split any VV sequence that is not a genuine diphthong or long vowel. E.g., [ta.e], [ko.et.taa]. This rule can apply within VVV+ sequences.
entailment
def T4(word, rules): '''Optionally split /u,y/-final diphthongs that do not take primary stress. E.g., [lau.ka.us], [va.ka.ut.taa].''' WORD = re.split( r'([ieaouäöy]+[^ieaouäöy]+\.*[ieaoäö]{1}(?:u|y)(?:\.*[^ieaouäöy]+|$))', # noqa word, flags=re.I | re.U) PARTS = [[] for part in range(len(WORD))] for i, v in enumerate(WORD): if i != 0: vv = u_y_final_diphthongs(v) if vv: I = vv.start(1) + 1 PARTS[i].append(v[:I] + '.' + v[I:]) # include original form (non-application of rule) PARTS[i].append(v) WORDS = [w for w in product(*PARTS)] for WORD in WORDS: WORD = ''.join(WORD) RULES = rules + ' T4' if word != WORD else rules yield WORD, RULES
Optionally split /u,y/-final diphthongs that do not take primary stress. E.g., [lau.ka.us], [va.ka.ut.taa].
entailment
def T6(word, rules): '''If a VVV-sequence contains a long vowel, insert a syllable boundary between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' offset = 0 try: WORD, rest = tuple(word.split('.', 1)) for vvv in long_vowel_sequences(rest): i = vvv.start(2) vvv = vvv.group(2) i += (2 if phon.is_long(vvv[:2]) else 1) + offset rest = rest[:i] + '.' + rest[i:] offset += 1 except ValueError: WORD = word for vvv in long_vowel_sequences(WORD): i = vvv.start(2) + 2 WORD = WORD[:i] + '.' + WORD[i:] try: WORD += '.' + rest except UnboundLocalError: pass rules += ' T6' if word != WORD else '' return WORD, rules
If a VVV-sequence contains a long vowel, insert a syllable boundary between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
entailment
def T8(word, rules): '''Join /ie/, /uo/, or /yö/ sequences in syllables that take primary stress.''' WORD = word try: vv = tail_diphthongs(WORD) i = vv.start(1) + 1 WORD = WORD[:i] + word[i + 1:] except AttributeError: pass rules += ' T8' if word != WORD else '' return WORD, rules
Join /ie/, /uo/, or /yö/ sequences in syllables that take primary stress.
entailment
def T11(word, rules): '''If a VVV sequence contains a /u,y/-final diphthong, insert a syllable boundary between the diphthong and the third vowel.''' WORD = word offset = 0 for vvv in precedence_sequences(WORD): i = vvv.start(1) + (1 if vvv.group(1)[-1] in 'uyUY' else 2) + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 rules += ' T11' if word != WORD else '' return WORD, rules
If a VVV sequence contains a /u,y/-final diphthong, insert a syllable boundary between the diphthong and the third vowel.
entailment
def pk_prom(word): '''Return the number of stressed light syllables.''' violations = 0 stressed = [] for w in extract_words(word): stressed += w.split('.')[2:-1:2] # odd syllables, excl. word-initial # (CVV = light) for syll in stressed: if phon.is_vowel(syll[-1]): violations += 1 # # (CVV = heavy) # for syll in stressed: # if re.search( # ur'^[^ieaouäöy]*[ieaouäöy]{1}$', syll, flags=re.I | re.U): # violations += 1 return violations
Return the number of stressed light syllables.
entailment
def rank(syllabifications): '''Rank syllabifications.''' # def key(s): # word = s[0] # w = wsp(word) # p = pk_prom(word) # n = nuc(word) # t = w + p + n # print('%s\twsp: %s\tpk: %s\tnuc: %s\ttotal: %s' % (word, w, p, n, t)) # return w + p + n # syllabifications.sort(key=key) syllabifications.sort(key=lambda s: wsp(s[0]) + pk_prom(s[0]) + nuc(s[0])) return syllabifications
Rank syllabifications.
entailment
def ansi_format_iter( self, x_start=0, y_start=0, width=None, height=None, frame=0, columns=1, downsample=1 ): """Return the ANSI escape sequence to render the image. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to the image width. height Height of the image data to render. Defaults to the image height. frame Single frame number, or a list of frame numbers to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1. """ image = self.get_image() frames = [] frame_count = 1 if not hasattr( image, 'n_frames' ) else image.n_frames if isinstance( frame, int ): assert frame in range( 0, frame_count ) frames = [frame] else: frames = [f for f in frame if f in range( 0, frame_count )] if not width: width = image.size[0]-x_start if not height: height = image.size[1]-y_start if image.mode == 'P': palette = from_palette_bytes( image.getpalette() ) def data_fetch( x, y, fr ): if fr not in range( 0, frame_count ): return Transparent() if not ((0 <= x < image.size[0]) and (0 <= y < image.size[1])): return Transparent() image.seek( fr ) return palette[image.getpixel( (x, y) )] for x in ansi.format_image_iter( data_fetch, x_start, y_start, width, height, frames, columns, downsample ): yield x return
Return the ANSI escape sequence to render the image. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to the image width. height Height of the image data to render. Defaults to the image height. frame Single frame number, or a list of frame numbers to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1.
entailment
def ansi_format_iter( self, x_start=0, y_start=0, width=None, height=None, frame=0, columns=1, downsample=1, frame_index=None, frame_flip_v=0, frame_flip_h=0 ): """Return the ANSI escape sequence to render the image. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to the image width. height Height of the image data to render. Defaults to the image height. frame Single frame number/object, or a list of frames to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1. frame_index Constant or mrc.Ref for a frame object property denoting the index. Defaults to None (i.e. frame itself should be an index). frame_flip_v Constant or mrc.Ref for a frame object property for whether to mirror vertically. Defaults to 0. frame_flip_h Constant or mrc.Ref for a frame object property for whether to mirror horizontally. Defaults to 0. """ assert x_start in range( 0, self.width ) assert y_start in range( 0, self.height ) if frame_index is not None: fn_index = lambda fr: mrc.property_get( frame_index, fr ) else: fn_index = lambda fr: fr if fr in range( 0, self.frame_count ) else None fn_flip_v = lambda fr: mrc.property_get( frame_flip_v, fr ) fn_flip_h = lambda fr: mrc.property_get( frame_flip_h, fr ) frames = [] try: frame_iter = iter( frame ) frames = [f for f in frame_iter] except TypeError: frames = [frame] if not width: width = self.width-x_start if not height: height = self.height-y_start stride = width*height def data_fetch( x, y, fr_obj ): fr = fn_index( fr_obj ) if fr is None: return Transparent() if not ((0 <= x < self.width) and (0 <= y < self.height)): return Transparent() if fn_flip_h( fr_obj ): x = self.width - x - 1 if fn_flip_v( fr_obj ): y = self.height - y - 1 index = self.width*y + x p = self.source[stride*fr+index] if self.mask: p = p if self.mask[stride*fr+index] else None return self.palette[p] if p is not None else Transparent() for x in ansi.format_image_iter( data_fetch, x_start, y_start, width, height, frames, columns, downsample ): yield x return
Return the ANSI escape sequence to render the image. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to the image width. height Height of the image data to render. Defaults to the image height. frame Single frame number/object, or a list of frames to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1. frame_index Constant or mrc.Ref for a frame object property denoting the index. Defaults to None (i.e. frame itself should be an index). frame_flip_v Constant or mrc.Ref for a frame object property for whether to mirror vertically. Defaults to 0. frame_flip_h Constant or mrc.Ref for a frame object property for whether to mirror horizontally. Defaults to 0.
entailment
def main(): """ Purge a single fastly url """ parser = OptionParser(description= "Purge a single url from fastly.") parser.add_option("-k", "--key", dest="apikey", default="", help="fastly api key") parser.add_option("-H", "--host", dest="host", help="host to purge from") parser.add_option("-p", "--path", dest="path", help="path to purge") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options" parser.print_help() sys.exit(1) client = fastly.connect(options.apikey) purge = client.purge_url(options.host, options.path) print purge
Purge a single fastly url
entailment
def set_callbacks(self, **dic_functions): """Register callbacks needed by the interface object""" for action in self.interface.CALLBACKS: try: f = dic_functions[action] except KeyError: pass else: setattr(self.interface.callbacks, action, f) manquantes = [ a for a in self.interface.CALLBACKS if not a in dic_functions] if not manquantes: logging.debug( f"{self.__class__.__name__} : Tous les callbacks demandés sont fournis.") else: logging.warning( f"{self.__class__.__name__} didn't set asked callbacks {manquantes}")
Register callbacks needed by the interface object
entailment
def populate(self, obj=None, section=None, parse_types=True): """Set attributes in ``obj`` with ``setattr`` from the all values in ``section``. """ section = self.default_section if section is None else section obj = Settings() if obj is None else obj is_dict = isinstance(obj, dict) for k, v in self.get_options(section).items(): if parse_types: if v == 'None': v = None elif self.FLOAT_REGEXP.match(v): v = float(v) elif self.INT_REGEXP.match(v): v = int(v) elif self.BOOL_REGEXP.match(v): v = v == 'True' else: m = self.EVAL_REGEXP.match(v) if m: evalstr = m.group(1) v = eval(evalstr) logger.debug('setting {} => {} on {}'.format(k, v, obj)) if is_dict: obj[k] = v else: setattr(obj, k, v) return obj
Set attributes in ``obj`` with ``setattr`` from the all values in ``section``.
entailment
def _get_calling_module(self): """Get the last module in the call stack that is not this module or ``None`` if the call originated from this module. """ for frame in inspect.stack(): mod = inspect.getmodule(frame[0]) logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if mod_name != __name__: return mod
Get the last module in the call stack that is not this module or ``None`` if the call originated from this module.
entailment
def resource_filename(self, resource_name, module_name=None): """Return a resource based on a file name. This uses the ``pkg_resources`` package first to find the resources. If it doesn't find it, it returns a path on the file system. :param: resource_name the file name of the resource to obtain (or name if obtained from an installed module) :param module_name: the name of the module to obtain the data, which defaults to ``__name__`` :return: a path on the file system or resource of the installed module """ if module_name is None: mod = self._get_calling_module() logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if module_name is None: module_name = __name__ if pkg_resources.resource_exists(mod_name, resource_name): res = pkg_resources.resource_filename(mod_name, resource_name) else: res = resource_name return Path(res)
Return a resource based on a file name. This uses the ``pkg_resources`` package first to find the resources. If it doesn't find it, it returns a path on the file system. :param: resource_name the file name of the resource to obtain (or name if obtained from an installed module) :param module_name: the name of the module to obtain the data, which defaults to ``__name__`` :return: a path on the file system or resource of the installed module
entailment
def parser(self): "Load the configuration file." if not hasattr(self, '_conf'): cfile = self.config_file logger.debug('loading config %s' % cfile) if os.path.isfile(cfile): conf = self._create_config_parser() conf.read(os.path.expanduser(cfile)) else: if self.robust: logger.debug('no default config file %s--skipping' % cfile) else: raise IOError('no such file: %s' % cfile) conf = None self._conf = conf return self._conf
Load the configuration file.
entailment
def get_options(self, section='default', opt_keys=None, vars=None): """ Get all options for a section. If ``opt_keys`` is given return only options with those keys. """ vars = vars if vars else self.default_vars conf = self.parser opts = {} if opt_keys is None: if conf is None: opt_keys = {} else: if not self.robust or conf.has_section(section): opt_keys = conf.options(section) else: opt_keys = {} else: logger.debug('conf: %s' % conf) copts = conf.options(section) if conf else {} opt_keys = set(opt_keys).intersection(set(copts)) for option in opt_keys: logger.debug(f'option: {option}, vars: {vars}') opts[option] = conf.get(section, option, vars=vars) return opts
Get all options for a section. If ``opt_keys`` is given return only options with those keys.
entailment
def get_option(self, name, section=None, vars=None, expect=None): """Return an option from ``section`` with ``name``. :param section: section in the ini file to fetch the value; defaults to constructor's ``default_section`` """ vars = vars if vars else self.default_vars if section is None: section = self.default_section opts = self.get_options(section, opt_keys=[name], vars=vars) if opts: return opts[name] else: if self._narrow_expect(expect): raise ValueError('no option \'{}\' found in section {}'. format(name, section))
Return an option from ``section`` with ``name``. :param section: section in the ini file to fetch the value; defaults to constructor's ``default_section``
entailment
def get_option_list(self, name, section=None, vars=None, expect=None, separator=','): """Just like ``get_option`` but parse as a list using ``split``. """ val = self.get_option(name, section, vars, expect) return val.split(separator) if val else []
Just like ``get_option`` but parse as a list using ``split``.
entailment
def get_option_boolean(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as a boolean (any case `true`). """ val = self.get_option(name, section, vars, expect) val = val.lower() if val else 'false' return val == 'true'
Just like ``get_option`` but parse as a boolean (any case `true`).
entailment
def get_option_int(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as an integer.""" val = self.get_option(name, section, vars, expect) if val: return int(val)
Just like ``get_option`` but parse as an integer.
entailment
def get_option_float(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as a float.""" val = self.get_option(name, section, vars, expect) if val: return float(val)
Just like ``get_option`` but parse as a float.
entailment
def get_option_path(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but return a ``pathlib.Path`` object of the string. """ val = self.get_option(name, section, vars, expect) return Path(val)
Just like ``get_option`` but return a ``pathlib.Path`` object of the string.
entailment
def property_get( prop, instance, **kwargs ): """Wrapper for property reads which auto-dereferences Refs if required. prop A Ref (which gets dereferenced and returned) or any other value (which gets returned). instance The context object used to dereference the Ref. """ if isinstance( prop, Ref ): return prop.get( instance, **kwargs ) return prop
Wrapper for property reads which auto-dereferences Refs if required. prop A Ref (which gets dereferenced and returned) or any other value (which gets returned). instance The context object used to dereference the Ref.
entailment