sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def property_set( prop, instance, value, **kwargs ):
"""Wrapper for property writes which auto-deferences Refs.
prop
A Ref (which gets dereferenced and the target value set).
instance
The context object used to dereference the Ref.
value
The value to set the property to.
Throws AttributeError if prop is not a Ref.
"""
if isinstance( prop, Ref ):
return prop.set( instance, value, **kwargs )
raise AttributeError( "can't change value of constant {} (context: {})".format( prop, instance ) ) | Wrapper for property writes which auto-deferences Refs.
prop
A Ref (which gets dereferenced and the target value set).
instance
The context object used to dereference the Ref.
value
The value to set the property to.
Throws AttributeError if prop is not a Ref. | entailment |
def view_property( prop ):
"""Wrapper for attributes of a View class which auto-dereferences Refs.
Equivalent to setting a property on the class with the getter wrapped
with property_get(), and the setter wrapped with property_set().
prop
A string containing the name of the class attribute to wrap.
"""
def getter( self ):
return property_get( getattr( self, prop ), self.parent )
def setter( self, value ):
return property_set( getattr( self, prop ), self.parent, value )
return property( getter, setter ) | Wrapper for attributes of a View class which auto-dereferences Refs.
Equivalent to setting a property on the class with the getter wrapped
with property_get(), and the setter wrapped with property_set().
prop
A string containing the name of the class attribute to wrap. | entailment |
def get( self, instance, **kwargs ):
"""Return an attribute from an object using the Ref path.
instance
The object instance to traverse.
"""
target = instance
for attr in self._path:
target = getattr( target, attr )
return target | Return an attribute from an object using the Ref path.
instance
The object instance to traverse. | entailment |
def set( self, instance, value, **kwargs ):
"""Set an attribute on an object using the Ref path.
instance
The object instance to traverse.
value
The value to set.
Throws AttributeError if allow_write is False.
"""
if not self._allow_write:
raise AttributeError( "can't set Ref directly, allow_write is disabled" )
target = instance
for attr in self._path[:-1]:
target = getattr( target, attr )
setattr( target, self._path[-1], value )
return | Set an attribute on an object using the Ref path.
instance
The object instance to traverse.
value
The value to set.
Throws AttributeError if allow_write is False. | entailment |
def traverse_until_fixpoint(predicate, tree):
"""Traverses the tree again and again until it is not modified."""
old_tree = None
tree = simplify(tree)
while tree and old_tree != tree:
old_tree = tree
tree = tree.traverse(predicate)
if not tree:
return None
tree = simplify(tree)
return tree | Traverses the tree again and again until it is not modified. | entailment |
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Initialise the GenObject
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
try:
sample[self.analysistype].pointfindergenus = self.pointfinder_org_dict[sample.general.referencegenus]
except KeyError:
sample[self.analysistype].pointfindergenus = 'ND'
# Run the raw read mapping
PointSipping(inputobject=self,
cutoff=self.cutoff)
# Create FASTA files from the raw read matcves
self.fasta()
# Run PointFinder on the FASTA files
self.run_pointfinder()
# Create summary reports of the PointFinder outputs
self.parse_pointfinder() | Run the necessary methods in the correct order | entailment |
def fasta(self):
"""
Create FASTA files of the PointFinder results to be fed into PointFinder
"""
logging.info('Extracting FASTA sequences matching PointFinder database')
for sample in self.runmetadata.samples:
# Ensure that there are sequence data to extract from the GenObject
if GenObject.isattr(sample[self.analysistype], 'sequences'):
# Set the name of the FASTA file
sample[self.analysistype].pointfinderfasta = \
os.path.join(sample[self.analysistype].outputdir,
'{seqid}_pointfinder.fasta'.format(seqid=sample.name))
# Create a list to store all the SeqRecords created
sequences = list()
with open(sample[self.analysistype].pointfinderfasta, 'w') as fasta:
for gene, sequence in sample[self.analysistype].sequences.items():
# Create a SeqRecord using a Seq() of the sequence - both SeqRecord and Seq are from BioPython
seq = SeqRecord(seq=Seq(sequence),
id=gene,
name=str(),
description=str())
sequences.append(seq)
# Write all the SeqRecords to file
SeqIO.write(sequences, fasta, 'fasta') | Create FASTA files of the PointFinder results to be fed into PointFinder | entailment |
def run_pointfinder(self):
"""
Run PointFinder on the FASTA sequences extracted from the raw reads
"""
logging.info('Running PointFinder on FASTA files')
for i in range(len(self.runmetadata.samples)):
# Start threads
threads = Thread(target=self.pointfinder_threads, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# PointFinder requires the path to the blastn executable
blast_path = shutil.which('blastn')
for sample in self.runmetadata.samples:
# Ensure that the attribute storing the name of the FASTA file has been created
if GenObject.isattr(sample[self.analysistype], 'pointfinderfasta'):
sample[self.analysistype].pointfinder_outputs = os.path.join(sample[self.analysistype].outputdir,
'pointfinder_outputs')
# Don't run the analyses if the outputs have already been created
if not os.path.isfile(os.path.join(sample[self.analysistype].pointfinder_outputs,
'{samplename}_blastn_results.tsv'.format(samplename=sample.name))):
make_path(sample[self.analysistype].pointfinder_outputs)
# Create and run the PointFinder system call
pointfinder_cmd = \
'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn ' \
'-o {output_dir} -m_p {blast_path}'\
.format(input=sample[self.analysistype].pointfinderfasta,
species=sample[self.analysistype].pointfindergenus,
db_path=self.targetpath,
output_dir=sample[self.analysistype].pointfinder_outputs,
blast_path=blast_path)
self.queue.put(pointfinder_cmd)
self.queue.join() | Run PointFinder on the FASTA sequences extracted from the raw reads | entailment |
def populate_summary_dict(self, genus=str(), key=str()):
"""
:param genus: Non-supported genus to be added to the dictionary
:param key: section of dictionary to be populated. Supported keys are: prediction, table, and results
Populate self.summary_dict as required. If the genus is not provided, populate the dictionary for Salmonella
Escherichia and Campylobacter. If the genus is provided, this genus is non-standard, and an 'empty' profile
must be created for it
"""
# If the genus is not provided, generate the generic dictionary
if not genus:
# Populate the summary dict
self.summary_dict = {
'Salmonella':
{
'prediction':
{
'header': 'Strain,Colitsin,Colistin,Spectinomycin,Quinolones,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Salmonella_prediction_summary.csv')
},
'table':
{
'header': 'Strain,parE,parC,gyrA,pmrB,pmrA,gyrB,16S_rrsD,23S,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Salmonella_table_summary.csv')
},
'results':
{
'header': 'Strain,Genus,Mutation,NucleotideChange,AminoAcidChange,Resistance,PMID,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'PointFinder_results_summary.csv')
}
},
'Escherichia':
{
'prediction':
{
'header': 'Strain,Colistin,GentamicinC,gentamicinC,Streptomycin,Macrolide,Sulfonamide,'
'Tobramycin,Neomycin,Fluoroquinolones,Aminocoumarin,Tetracycline,KanamycinA,'
'Spectinomycin,B-lactamResistance,Paromomycin,Kasugamicin,Quinolones,G418,'
'QuinolonesAndfluoroquinolones,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Escherichia_prediction_summary.csv')
},
'table':
{
'header': 'Strain,parE,parC,folP,gyrA,pmrB,pmrA,16S_rrsB,16S_rrsH,gyrB,ampC,'
'16S_rrsC,23S,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Escherichia_table_summary.csv')
},
'results':
{
'header': 'Strain,Genus,Mutation,NucleotideChange,AminoAcidChange,Resistance,PMID,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'PointFinder_results_summary.csv')
}
},
'Campylobacter':
{
'prediction':
{
'header': 'Strain,LowLevelIncreaseMIC,AssociatedWithT86Mutations,Macrolide,Quinolone,'
'Streptinomycin,Erythromycin,IntermediateResistance,HighLevelResistance_'
'nalidixic_and_ciprofloxacin,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Campylobacter_prediction_summary.csv')
},
'table':
{
'header': 'Strain,L22,rpsL,cmeR,gyrA,23S,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'Campylobacter_table_summary.csv')
},
'results':
{
'header': 'Strain,Genus,Mutation,NucleotideChange,AminoAcidChange,Resistance,PMID,\n',
'output': str(),
'summary': os.path.join(self.reportpath, 'PointFinder_results_summary.csv')
}
}
}
else:
# Create the nesting structure as required
if genus not in self.summary_dict:
self.summary_dict[genus] = dict()
if key not in self.summary_dict[genus]:
self.summary_dict[genus][key] = dict()
# The output section is the same regardless of the key
self.summary_dict[genus][key]['output'] = str()
# The results report is more generic, and contains all strains, so the header and summary are set to
# the default values required to generate this report
if key == 'results':
self.summary_dict[genus][key]['header'] = \
'Strain,Genus,Mutation,NucleotideChange,AminoAcidChange,Resistance,PMID,\n'
self.summary_dict[genus][key]['summary'] = \
os.path.join(self.reportpath, 'PointFinder_results_summary.csv')
# Create an empty header, and a report with the genus name
else:
self.summary_dict[genus][key]['header'] = 'Strain,\n'
self.summary_dict[genus][key]['summary'] = os.path.join(self.reportpath, '{genus}_{key}_summary.csv'
.format(genus=genus,
key=key))
# Remove the report if it exists, as the script will append data to this existing report
if os.path.isfile(self.summary_dict[genus][key]['summary']):
os.remove(self.summary_dict[genus][key]['summary']) | :param genus: Non-supported genus to be added to the dictionary
:param key: section of dictionary to be populated. Supported keys are: prediction, table, and results
Populate self.summary_dict as required. If the genus is not provided, populate the dictionary for Salmonella
Escherichia and Campylobacter. If the genus is provided, this genus is non-standard, and an 'empty' profile
must be created for it | entailment |
def parse_pointfinder(self):
"""
Create summary reports for the PointFinder outputs
"""
# Create the nested dictionary that stores the necessary values for creating summary reports
self.populate_summary_dict()
# Clear out any previous reports
for organism in self.summary_dict:
for report in self.summary_dict[organism]:
try:
os.remove(self.summary_dict[organism][report]['summary'])
except FileNotFoundError:
pass
for sample in self.runmetadata.samples:
# Find the PointFinder outputs. If the outputs don't exist, create the appropriate entries in the
# summary dictionary as required
try:
self.summary_dict[sample.general.referencegenus]['prediction']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*prediction.txt'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['prediction']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='prediction')
try:
self.summary_dict[sample.general.referencegenus]['table']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*table.txt'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['table']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='table')
try:
self.summary_dict[sample.general.referencegenus]['results']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*results.tsv'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['results']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='results')
# Process the predictions
self.write_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus,
key='prediction')
# Process the results summary
self.write_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus,
key='results')
# Process the table summary
self.write_table_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus) | Create summary reports for the PointFinder outputs | entailment |
def write_report(summary_dict, seqid, genus, key):
"""
Parse the PointFinder outputs, and write the summary report for the current analysis type
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
:param key: current result type. Options are 'prediction', and 'results'
"""
# Set the header string if the summary report doesn't already exist
if not os.path.isfile(summary_dict[genus][key]['summary']):
header_string = summary_dict[genus][key]['header']
else:
header_string = str()
summary_string = str()
try:
# Read in the predictions
with open(summary_dict[genus][key]['output'], 'r') as outputs:
# Skip the header
next(outputs)
for line in outputs:
# Skip empty lines
if line != '\n':
# When processing the results outputs, add the seqid to the summary string
if key == 'results':
summary_string += '{seq},{genus},'.format(seq=seqid,
genus=genus)
# Clean up the string before adding it to the summary string - replace commas
# with semi-colons, and replace tabs with commas
summary_string += line.replace(',', ';').replace('\t', ',')
# Ensure that there were results to report
if summary_string:
if not summary_string.endswith('\n'):
summary_string += '\n'
else:
if key == 'results':
summary_string += '{seq},{genus}\n'.format(seq=seqid,
genus=genus)
else:
summary_string += '{seq}\n'.format(seq=seqid)
# Write the summaries to the summary file
with open(summary_dict[genus][key]['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string)
# Add the strain information If no FASTA file could be created by reference mapping
except FileNotFoundError:
# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the
# empty column created by a trailing comma
header_len = len(summary_dict[genus][key]['header'].split(',')) - 2
# When processing the results outputs, add the seqid to the summary string
if key == 'results':
summary_string += '{seq},{genus}\n'.format(seq=seqid,
genus=genus)
# For the prediction summary, populate the summary string with the appropriate number of comma-separated
# '0' entries
elif key == 'prediction':
summary_string += '{seq}{empty}\n'.format(seq=seqid,
empty=',0' * header_len)
# Write the summaries to the summary file
with open(summary_dict[genus][key]['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string) | Parse the PointFinder outputs, and write the summary report for the current analysis type
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
:param key: current result type. Options are 'prediction', and 'results' | entailment |
def write_table_report(summary_dict, seqid, genus):
"""
Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
"""
# Set the header string if the summary report doesn't already exist
if not os.path.isfile(summary_dict[genus]['table']['summary']):
header_string = summary_dict[genus]['table']['header']
else:
header_string = str()
summary_string = '{seq},'.format(seq=seqid)
try:
# Read in the predictions
with open(summary_dict[genus]['table']['output'], 'r') as outputs:
for header_value in summary_dict[genus]['table']['header'].split(',')[:-1]:
for line in outputs:
if line.startswith('{hv}\n'.format(hv=header_value)):
# Iterate through the lines following the match
for subline in outputs:
if subline != '\n':
if subline.startswith('Mutation'):
for detailline in outputs:
if detailline != '\n':
summary_string += '{},'.format(detailline.split('\t')[0])
else:
break
else:
summary_string += '{},'.format(
subline.replace(',', ';').replace('\t', ',').rstrip())
break
else:
break
break
# Reset the file iterator to the first line in preparation for the next header
outputs.seek(0)
# Ensure that there were results to report
if summary_string:
if not summary_string.endswith('\n'):
summary_string += '\n'
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string)
except FileNotFoundError:
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the
# empty column created by a trailing comma
header_len = len(summary_dict[genus]['table']['header'].split(',')) - 2
# Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries
summary_string += '{empty}\n'.format(empty='Gene not found,' * header_len)
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string) | Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate | entailment |
def targets(self):
"""
Search the targets folder for FASTA files, create the multi-FASTA file of all targets if necessary, and
populate objects
"""
logging.info('Performing analysis with {} targets folder'.format(self.analysistype))
for sample in self.runmetadata:
sample[self.analysistype].runanalysis = True
sample[self.analysistype].targetpath = (os.path.join(self.targetpath,
sample[self.analysistype].pointfindergenus))
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,
# there will be a helper script to combine individual files into a properly formatted combined file
try:
sample[self.analysistype].baitfile = glob(os.path.join(sample[self.analysistype].targetpath,
'*.fasta'))[0]
# If the fasta file is missing, raise a custom error
except IndexError:
# Combine any .tfa files in the directory into a combined targets .fasta file
fsafiles = glob(os.path.join(sample[self.analysistype].targetpath, '*.fsa'))
if fsafiles:
combinetargets(fsafiles, sample[self.analysistype].targetpath)
try:
sample[self.analysistype].baitfile = glob(os.path.join(sample[self.analysistype].targetpath,
'*.fasta'))[0]
except IndexError as e:
# noinspection PyPropertyAccess
e.args = [
'Cannot find the combined fasta file in {}. Please note that the file must have a '
'.fasta extension'.format(sample[self.analysistype].targetpath)]
if os.path.isdir(sample[self.analysistype].targetpath):
raise
else:
sample[self.analysistype].runanalysis = False
for sample in self.runmetadata:
# Set the necessary attributes
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
make_path(sample[self.analysistype].outputdir)
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'logout.txt')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'logerr.txt')
sample[self.analysistype].baitedfastq = \
os.path.join(sample[self.analysistype].outputdir,
'{at}_targetMatches.fastq.gz'.format(at=self.analysistype)) | Search the targets folder for FASTA files, create the multi-FASTA file of all targets if necessary, and
populate objects | entailment |
def strains(self):
"""
Create a dictionary of SEQID: OLNID from the supplied
"""
with open(os.path.join(self.path, 'strains.csv')) as strains:
next(strains)
for line in strains:
oln, seqid = line.split(',')
self.straindict[oln] = seqid.rstrip()
self.strainset.add(oln)
logging.debug(oln)
if self.debug:
break | Create a dictionary of SEQID: OLNID from the supplied | entailment |
def sequence_prep(self):
"""
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory
"""
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath))))
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os.path.splitext(os.path.basename(sample))[0]
if sample_name in self.strainset:
# Extract the OLNID from the dictionary using the SEQID
samplename = self.straindict[sample_name]
# samplename = sample_name
# Set and create the output directory
outputdir = os.path.join(self.path, samplename)
make_path(outputdir)
# Set the name of the JSON file
json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename))
if not os.path.isfile(json_metadata):
# Create the name and output directory attributes
metadata.name = samplename
metadata.seqid = sample_name
metadata.outputdir = outputdir
metadata.jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata.bestassemblyfile = os.path.join(metadata.outputdir,
'{name}.fasta'.format(name=metadata.name))
# Symlink the original file to the output directory
relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name))
# Associate the corresponding FASTQ files with the assembly
metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath,
'{name}*.gz'.format(name=metadata.name))))
metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles
# Write the object to file
self.write_json(metadata)
else:
metadata = self.read_json(json_metadata)
# Add the metadata object to the list of objects
self.metadata.append(metadata) | Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory | entailment |
def write_json(metadata):
"""
Write the metadata object to file
:param metadata: Metadata object
"""
# Open the metadata file to write
with open(metadata.jsonfile, 'w') as metadatafile:
# Write the json dump of the object dump to the metadata file
json.dump(metadata.dump(), metadatafile, sort_keys=True, indent=4, separators=(',', ': ')) | Write the metadata object to file
:param metadata: Metadata object | entailment |
def read_json(json_metadata):
"""
Read the metadata object from file
:param json_metadata: Path and file name of JSON-formatted metadata object file
:return: metadata object
"""
# Load the metadata object from the file
with open(json_metadata) as metadatareport:
jsondata = json.load(metadatareport)
# Create the metadata objects
metadata = MetadataObject()
# Initialise the metadata categories as GenObjects created using the appropriate key
for attr in jsondata:
if not isinstance(jsondata[attr], dict):
setattr(metadata, attr, jsondata[attr])
else:
setattr(metadata, attr, GenObject(jsondata[attr]))
return metadata | Read the metadata object from file
:param json_metadata: Path and file name of JSON-formatted metadata object file
:return: metadata object | entailment |
def assembly_length(self):
"""
Use SeqIO.parse to extract the total number of bases in each assembly file
"""
for sample in self.metadata:
# Only determine the assembly length if is has not been previously calculated
if not GenObject.isattr(sample, 'assembly_length'):
# Create the assembly_length attribute, and set it to 0
sample.assembly_length = 0
for record in SeqIO.parse(sample.bestassemblyfile, 'fasta'):
# Update the assembly_length attribute with the length of the current contig
sample.assembly_length += len(record.seq)
# Write the updated object to file
self.write_json(sample) | Use SeqIO.parse to extract the total number of bases in each assembly file | entailment |
def simulate_reads(self):
"""
Use the PacBio assembly FASTA files to generate simulated reads of appropriate forward and reverse lengths
at different depths of sequencing using randomreads.sh from the bbtools suite
"""
logging.info('Read simulation')
for sample in self.metadata:
# Create the simulated_reads GenObject
sample.simulated_reads = GenObject()
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.simulated_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.simulated_reads[depth].depth = depth
sample.simulated_reads[depth].depth_dir = os.path.join(sample.outputdir, 'simulated', depth)
# Create the output directory
make_path(sample.simulated_reads[depth].depth_dir)
# Iterate through all the desired forward and reverse read pair lengths
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.simulated_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.simulated_reads[depth][read_pair].outputdir = \
os.path.join(sample.simulated_reads[depth].depth_dir, read_pair)
make_path(sample.simulated_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.simulated_reads[depth][read_pair].forward_reads = GenObject()
sample.simulated_reads[depth][read_pair].reverse_reads = GenObject()
# Extract the forward and reverse reads lengths from the read_pair variable
sample.simulated_reads[depth][read_pair].forward_reads.length, \
sample.simulated_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
# Set the name of the forward reads - include the depth and read length information
sample.simulated_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Create the trimmed output directory attribute
sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir \
= os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'simulated_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Calculate the number of reads required for the forward and reverse reads to yield the
# desired coverage depth e.g. 5Mbp genome at 20X coverage: 100Mbp in reads. 50bp forward reads
# 150bp reverse reads: forward proportion is 50 / (150 + 50) = 0.25 (and reverse is 0.75).
# Forward total reads is 25Mbp (75Mbp reverse). Number of reads required = 25Mbp / 50 bp
# 500000 reads total (same for reverse, as the reads are longer)
sample.simulated_reads[depth][read_pair].num_reads = \
int(sample.assembly_length *
int(depth) *
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) /
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) +
int(sample.simulated_reads[depth][read_pair].reverse_reads.length)
)
) /
int(sample.simulated_reads[depth][read_pair].forward_reads.length)
)
logging.info(
'Simulating {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.simulated_reads[depth][read_pair].forward_reads.length,
rl=sample.simulated_reads[depth][read_pair].reverse_reads.length))
# If the reverse reads are set to 0, supply different parameters to randomreads
if sample.simulated_reads[depth][read_pair].reverse_reads.length != '0':
# Ensure that both the simulated reads, and the trimmed simulated reads files don't
# exist before simulating the reads
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq) and \
not os.path.isfile(
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't',
'Xmx': self.mem}
)
else:
try:
forward_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.forward_reads.fastq)
except FileNotFoundError:
forward_size = 0
try:
reverse_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.reverse_reads.fastq)
except FileNotFoundError:
reverse_size = 0
if forward_size <= 100 or reverse_size <= 100:
try:
os.remove(sample.simulated_reads[depth][read_pair].forward_reads.fastq)
except FileNotFoundError:
pass
try:
os.remove(sample.simulated_reads[depth][read_pair].reverse_reads.fastq)
except FileNotFoundError:
pass
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools \
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
else:
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].forward_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=False,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
# Update the JSON file
self.write_json(sample) | Use the PacBio assembly FASTA files to generate simulated reads of appropriate forward and reverse lengths
at different depths of sequencing using randomreads.sh from the bbtools suite | entailment |
def read_length_adjust(self, analysistype):
"""
Trim the reads to the correct length using reformat.sh
:param analysistype: current analysis type. Will be either 'simulated' or 'sampled'
"""
logging.info('Trimming {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
logging.info(
'Trimming forward {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].forward_reads.length))
# Create the output path if necessary
make_path(os.path.dirname(sample[read_type][depth][read_pair].forward_reads[fastq_type]))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].forward_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].forward_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].forward_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].forward_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].forward_reads.fastq)
# except FileNotFoundError:
# pass
else:
# If the files do not need to be trimmed, create a symlink to the original file
relative_symlink(sample[read_type][depth][read_pair].forward_reads.fastq,
os.path.dirname(sample[read_type][depth][read_pair].
forward_reads[fastq_type]),
os.path.basename(sample[read_type][depth][read_pair].
forward_reads[fastq_type])
)
# Same as above, but for the reverse reads
logging.info(
'Trimming reverse {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].reverse_reads.length))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].reverse_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].reverse_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].reverse_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].reverse_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
})
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].reverse_reads.fastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample) | Trim the reads to the correct length using reformat.sh
:param analysistype: current analysis type. Will be either 'simulated' or 'sampled' | entailment |
def read_quality_trim(self):
"""
Perform quality trim, and toss reads below appropriate thresholds
"""
logging.info('Quality trim')
for sample in self.metadata:
sample.sampled_reads = GenObject()
sample.sampled_reads.outputdir = os.path.join(sample.outputdir, 'sampled')
sample.sampled_reads.trimmed_dir = os.path.join(sample.sampled_reads.outputdir, 'qualitytrimmed_reads')
make_path(sample.sampled_reads.trimmed_dir)
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.sampled_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.sampled_reads[depth].depth = depth
sample.sampled_reads[depth].depth_dir = os.path.join(sample.sampled_reads.outputdir, depth)
# Create the output directory
make_path(sample.sampled_reads[depth].depth_dir)
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.sampled_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.sampled_reads[depth][read_pair].outputdir = \
os.path.join(sample.sampled_reads[depth].depth_dir, read_pair)
make_path(sample.sampled_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.sampled_reads[depth][read_pair].forward_reads = GenObject()
sample.sampled_reads[depth][read_pair].reverse_reads = GenObject()
sample.sampled_reads[depth][read_pair].trimmed_dir = \
os.path.join(sample.sampled_reads.trimmed_dir,
read_pair)
make_path(sample.sampled_reads[depth][read_pair].trimmed_dir)
# Extract the forward and reverse reads lengths from the read_pair variable
sample.sampled_reads[depth][read_pair].forward_reads.length, \
sample.sampled_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
logging.info('Performing quality trimming on reads from sample {name} at depth {depth} '
'for minimum read length {forward}'
.format(name=sample.name,
depth=depth,
forward=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Set the attributes for the trimmed forward and reverse reads to use for subsampling
sample.sampled_reads[depth][read_pair].trimmed_forwardfastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R1.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
sample.sampled_reads[depth][read_pair].trimmed_reversefastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R2.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Create the trimmed output directory attribute
sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir,
'sampled_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Sample if the forward output file does not already exist
if not os.path.isfile(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq) and \
not os.path.isfile(
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_cmd = \
bbtools.bbduk_trim(forward_in=sample.forward_fastq,
forward_out=sample.sampled_reads[depth][read_pair]
.trimmed_forwardfastq,
reverse_in=sample.reverse_fastq,
reverse_out=sample.sampled_reads[depth][read_pair]
.trimmed_reversefastq,
minlength=sample.sampled_reads[depth][read_pair]
.forward_reads.length,
forcetrimleft=0,
returncmd=True,
**{'ziplevel': '9',
'Xmx': self.mem})
# Update the JSON file
self.write_json(sample) | Perform quality trim, and toss reads below appropriate thresholds | entailment |
def sample_reads(self):
"""
For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
lengths and sequencing depths using reformat.sh from the bbtools suite
"""
logging.info('Read sampling')
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Set the name of the output directory
sample.sampled_reads[depth][read_pair].sampled_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir, 'sampled')
# Set the name of the forward reads - include the depth and read length information
sample.sampled_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
logging.info(
'Sampling {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.sampled_reads[depth][read_pair].forward_reads.length,
rl=sample.sampled_reads[depth][read_pair].reverse_reads.length))
# Use the reformat method in the OLCTools bbtools wrapper
# Note that upsample=t is used to ensure that the target number of reads (samplereadstarget) is met
if not os.path.isfile(sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_call = bbtools \
.reformat_reads(forward_in=sample.sampled_reads[depth][read_pair].trimmed_forwardfastq,
reverse_in=sample.sampled_reads[depth][read_pair].trimmed_reversefastq,
forward_out=sample.sampled_reads[depth][read_pair].forward_reads.fastq,
reverse_out=sample.sampled_reads[depth][read_pair].reverse_reads.fastq,
returncmd=True,
**{'samplereadstarget': sample.simulated_reads[depth][read_pair].num_reads,
'upsample': 't',
'minlength':
sample.sampled_reads[depth][read_pair].forward_reads.length,
'ziplevel': '9',
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the trimmed reads, as they are no longer necessary
# try:
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq)
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_reversefastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample) | For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
lengths and sequencing depths using reformat.sh from the bbtools suite | entailment |
def link_reads(self, analysistype):
"""
Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled'
"""
logging.info('Linking {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Create the output directories
genesippr_dir = os.path.join(self.path, 'genesippr', sample.name)
sample.genesippr_dir = genesippr_dir
make_path(genesippr_dir)
cowbat_dir = os.path.join(self.path, 'cowbat', sample.name)
sample.cowbat_dir = cowbat_dir
make_path(cowbat_dir)
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
# Link reads to both output directories
for output_dir in [genesippr_dir, cowbat_dir]:
# If the original reads are shorter than the specified read length, the FASTQ files will exist,
# but will be empty. Do not create links for these files
size = os.path.getsize(sample[read_type][depth][read_pair].forward_reads[fastq_type])
if size > 20:
# Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e.g.
# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
# is the relative path to the output_dir. The link name is the base name of the reads
# joined to the desired output directory e.g.
# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
relative_symlink(sample[read_type][depth][read_pair].forward_reads[fastq_type],
output_dir)
# Original FASTQ files
relative_symlink(sample.forward_fastq,
output_dir)
relative_symlink(sample.reverse_fastq,
output_dir)
# Reverse reads
try:
size = os.path.getsize(sample[read_type][depth][read_pair].reverse_reads[fastq_type])
if size > 20:
relative_symlink(sample[read_type][depth][read_pair].reverse_reads[fastq_type],
output_dir)
except FileNotFoundError:
pass | Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled' | entailment |
def run_genesippr(self):
"""
Run GeneSippr on each of the samples
"""
from pathlib import Path
home = str(Path.home())
logging.info('GeneSippr')
# These unfortunate hard coded paths appear to be necessary
miniconda_path = os.path.join(home, 'miniconda3')
miniconda_path = miniconda_path if os.path.isdir(miniconda_path) else os.path.join(home, 'miniconda')
logging.debug(miniconda_path)
activate = 'source {mp}/bin/activate {mp}/envs/sipprverse'.format(mp=miniconda_path)
sippr_path = '{mp}/envs/sipprverse/bin/sippr.py'.format(mp=miniconda_path)
for sample in self.metadata:
logging.info(sample.name)
# Run the pipeline. Check to make sure that the serosippr report, which is created last doesn't exist
if not os.path.isfile(os.path.join(sample.genesippr_dir, 'reports', 'genesippr.csv')):
cmd = 'python {py_path} -o {outpath} -s {seqpath} -r {refpath} -F'\
.format(py_path=sippr_path,
outpath=sample.genesippr_dir,
seqpath=sample.genesippr_dir,
refpath=self.referencefilepath
)
logging.critical(cmd)
# Create another shell script to execute within the PlasmidExtractor conda environment
template = "#!/bin/bash\n{activate} && {cmd}".format(activate=activate,
cmd=cmd)
genesippr_script = os.path.join(sample.genesippr_dir, 'run_genesippr.sh')
with open(genesippr_script, 'w+') as file:
file.write(template)
# Modify the permissions of the script to allow it to be run on the node
self.make_executable(genesippr_script)
# Run shell script
os.system('/bin/bash {}'.format(genesippr_script)) | Run GeneSippr on each of the samples | entailment |
def set_level(self, level):
"""
Set the logging level of this logger.
:param level: must be an int or a str.
"""
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level) | Set the logging level of this logger.
:param level: must be an int or a str. | entailment |
def disable_logger(self, disabled=True):
"""
Disable all logging calls.
"""
# Disable standard IO streams
if disabled:
sys.stdout = _original_stdout
sys.stderr = _original_stderr
else:
sys.stdout = self.__stdout_stream
sys.stderr = self.__stderr_stream
# Disable handlers
self.logger.disabled = disabled | Disable all logging calls. | entailment |
def redirect_stdout(self, enabled=True, log_level=logging.INFO):
"""
Redirect sys.stdout to file-like object.
"""
if enabled:
if self.__stdout_wrapper:
self.__stdout_wrapper.update_log_level(log_level=log_level)
else:
self.__stdout_wrapper = StdOutWrapper(logger=self, log_level=log_level)
self.__stdout_stream = self.__stdout_wrapper
else:
self.__stdout_stream = _original_stdout
# Assign the new stream to sys.stdout
sys.stdout = self.__stdout_stream | Redirect sys.stdout to file-like object. | entailment |
def redirect_stderr(self, enabled=True, log_level=logging.ERROR):
"""
Redirect sys.stderr to file-like object.
"""
if enabled:
if self.__stderr_wrapper:
self.__stderr_wrapper.update_log_level(log_level=log_level)
else:
self.__stderr_wrapper = StdErrWrapper(logger=self, log_level=log_level)
self.__stderr_stream = self.__stderr_wrapper
else:
self.__stderr_stream = _original_stderr
# Assign the new stream to sys.stderr
sys.stderr = self.__stderr_stream | Redirect sys.stderr to file-like object. | entailment |
def use_file(self, enabled=True,
file_name=None,
level=logging.WARNING,
when='d',
interval=1,
backup_count=30,
delay=False,
utc=False,
at_time=None,
log_format=None,
date_format=None):
"""
Handler for logging to a file, rotating the log file at certain timed intervals.
"""
if enabled:
if not self.__file_handler:
assert file_name, 'File name is missing!'
# Create new TimedRotatingFileHandler instance
kwargs = {
'filename': file_name,
'when': when,
'interval': interval,
'backupCount': backup_count,
'encoding': 'UTF-8',
'delay': delay,
'utc': utc,
}
if sys.version_info[0] >= 3:
kwargs['atTime'] = at_time
self.__file_handler = TimedRotatingFileHandler(**kwargs)
# Use this format for default case
if not log_format:
log_format = '%(asctime)s %(name)s[%(process)d] ' \
'%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \
'%(levelname)s %(message)s'
# Set formatter
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
self.__file_handler.setFormatter(fmt=formatter)
# Set level for this handler
self.__file_handler.setLevel(level=level)
# Add this handler to logger
self.add_handler(hdlr=self.__file_handler)
elif self.__file_handler:
# Remove handler from logger
self.remove_handler(hdlr=self.__file_handler)
self.__file_handler = None | Handler for logging to a file, rotating the log file at certain timed intervals. | entailment |
def use_loggly(self, enabled=True,
loggly_token=None,
loggly_tag=None,
level=logging.WARNING,
log_format=None,
date_format=None):
"""
Enable handler for sending the record to Loggly service.
"""
if enabled:
if not self.__loggly_handler:
assert loggly_token, 'Loggly token is missing!'
# Use logger name for default Loggly tag
if not loggly_tag:
loggly_tag = self.name
# Create new LogglyHandler instance
self.__loggly_handler = LogglyHandler(token=loggly_token, tag=loggly_tag)
# Use this format for default case
if not log_format:
log_format = '{"name":"%(name)s","process":"%(process)d",' \
'"levelname":"%(levelname)s","time":"%(asctime)s",' \
'"filename":"%(filename)s","programname":"%(programname)s",' \
'"module":"%(module)s","funcName":"%(funcName)s",' \
'"lineno":"%(lineno)d","message":"%(message)s"}'
# Set formatter
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
self.__loggly_handler.setFormatter(fmt=formatter)
# Set level for this handler
self.__loggly_handler.setLevel(level=level)
# Add this handler to logger
self.add_handler(hdlr=self.__loggly_handler)
elif self.__loggly_handler:
# Remove handler from logger
self.remove_handler(hdlr=self.__loggly_handler)
self.__loggly_handler = None | Enable handler for sending the record to Loggly service. | entailment |
def __find_caller(stack_info=False):
"""
Find the stack frame of the caller so that we can note the source file name,
line number and function name.
"""
frame = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if frame:
frame = frame.f_back
caller_info = '(unknown file)', 0, '(unknown function)', None
while hasattr(frame, 'f_code'):
co = frame.f_code
if _logone_src in os.path.normcase(co.co_filename):
frame = frame.f_back
continue
tb_info = None
if stack_info:
with StringIO() as _buffer:
_buffer.write('Traceback (most recent call last):\n')
traceback.print_stack(frame, file=_buffer)
tb_info = _buffer.getvalue().strip()
caller_info = co.co_filename, frame.f_lineno, co.co_name, tb_info
break
return caller_info | Find the stack frame of the caller so that we can note the source file name,
line number and function name. | entailment |
def _log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if logging.raiseExceptions:
raise TypeError('Level must be an integer!')
else:
return
if self.logger.isEnabledFor(level=level):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
exc_info = kwargs.get('exc_info', None)
extra = kwargs.get('extra', None)
stack_info = kwargs.get('stack_info', False)
record_filter = kwargs.get('record_filter', None)
tb_info = None
if _logone_src:
# IronPython doesn't track Python frames, so findCaller raises an
# exception on some versions of IronPython. We trap it here so that
# IronPython can use logging.
try:
fn, lno, func, tb_info = self.__find_caller(stack_info=stack_info)
except ValueError: # pragma: no cover
fn, lno, func = '(unknown file)', 0, '(unknown function)'
else: # pragma: no cover
fn, lno, func = '(unknown file)', 0, '(unknown function)'
if exc_info:
if sys.version_info[0] >= 3:
if isinstance(exc_info, BaseException):
# noinspection PyUnresolvedReferences
exc_info = type(exc_info), exc_info, exc_info.__traceback__
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
else:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if sys.version_info[0] >= 3:
# noinspection PyArgumentList
record = self.logger.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, tb_info)
else:
record = self.logger.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra)
if record_filter:
record = record_filter(record)
self.logger.handle(record=record) | Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1) | entailment |
def flush(self):
"""
Flush the buffer, if applicable.
"""
if self.__buffer.tell() > 0:
# Write the buffer to log
# noinspection PyProtectedMember
self.__logger._log(level=self.__log_level, msg=self.__buffer.getvalue().strip(),
record_filter=StdErrWrapper.__filter_record)
# Remove the old buffer
self.__buffer.truncate(0)
self.__buffer.seek(0) | Flush the buffer, if applicable. | entailment |
def syllabify(word):
'''Syllabify the given word, whether simplex or complex.'''
compound = bool(re.search(r'(-| |=)', word))
syllabify = _syllabify_compound if compound else _syllabify
syllabifications = list(syllabify(word))
for syll, rules in syllabifications:
yield syll, rules
n = 16 - len(syllabifications)
# yield empty syllabifications and rules
for i in range(n):
yield '', '' | Syllabify the given word, whether simplex or complex. | entailment |
def apply_T4(word):
'''An agglutination diphthong that ends in /u, y/ optionally contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = word.split('.')
PARTS = [[] for part in range(len(WORD))]
for i, v in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
vv = u_y_final_diphthongs(v)
if vv:
I = vv.start(1) + 1
PARTS[i].append(v[:I] + '.' + v[I:])
# include original form (non-application of rule)
PARTS[i].append(v)
WORDS = [w for w in product(*PARTS)]
for WORD in WORDS:
WORD = '.'.join(WORD)
RULE = ' T4' if word != WORD else ''
yield WORD, RULE | An agglutination diphthong that ends in /u, y/ optionally contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa]. | entailment |
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name) | Setup logging. | entailment |
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record) | Uses contextstring if request_id is set, otherwise default. | entailment |
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines) | Format exception output with CONF.logging_exception_prefix. | entailment |
def _set_boutons_interface(self, buttons):
"""Display buttons given by the list of tuples (id,function,description,is_active)"""
for id_action, f, d, is_active in buttons:
icon = self.get_icon(id_action)
action = self.addAction(QIcon(icon), d)
action.setEnabled(is_active)
action.triggered.connect(f) | Display buttons given by the list of tuples (id,function,description,is_active) | entailment |
def set_interface(self, interface):
"""Add update toolbar callback to the interface"""
self.interface = interface
self.interface.callbacks.update_toolbar = self._update
self._update() | Add update toolbar callback to the interface | entailment |
def _update(self):
"""Update the display of button after querying data from interface"""
self.clear()
self._set_boutons_communs()
if self.interface:
self.addSeparator()
l_actions = self.interface.get_actions_toolbar()
self._set_boutons_interface(l_actions) | Update the display of button after querying data from interface | entailment |
def init_login(self, from_local=False):
"""Display login screen. May ask for local data loading if from_local is True."""
if self.toolbar:
self.removeToolBar(self.toolbar)
widget_login = login.Loading(self.statusBar(), self.theory_main)
self.centralWidget().addWidget(widget_login)
widget_login.loaded.connect(self.init_tabs)
widget_login.canceled.connect(self._quit)
widget_login.updated.connect(self.on_update_at_launch)
if from_local:
widget_login.propose_load_local()
else:
self.statusBar().showMessage("Données chargées depuis le serveur.", 5000) | Display login screen. May ask for local data loading if from_local is True. | entailment |
def make_response(self, status, content_type, response):
"""Shortcut for making a response to the client's request."""
headers = [('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'GET, POST, OPTIONS'),
('Access-Control-Allow-Headers', 'Content-Type'),
('Access-Control-Max-Age', '86400'),
('Content-type', content_type)
]
self.start_response(status, headers)
return [response.encode()] | Shortcut for making a response to the client's request. | entailment |
def process_request(self, request):
"""Processes a request."""
try:
request = Request.from_json(request.read().decode())
except ValueError:
raise ClientError('Data is not valid JSON.')
except KeyError:
raise ClientError('Missing mandatory field in request object.')
except AttributeNotProvided as exc:
raise ClientError('Attribute not provided: %s.' % exc.args[0])
(start_wall_time, start_process_time) = self._get_times()
answers = self.router_class(request).answer()
self._add_times_to_answers(answers, start_wall_time, start_process_time)
answers = [x.as_dict() for x in answers]
return self.make_response('200 OK',
'application/json',
json.dumps(answers)
) | Processes a request. | entailment |
def on_post(self):
"""Extracts the request, feeds the module, and returns the response."""
request = self.environ['wsgi.input']
try:
return self.process_request(request)
except ClientError as exc:
return self.on_client_error(exc)
except BadGateway as exc:
return self.on_bad_gateway(exc)
except InvalidConfig:
raise
except Exception as exc: # pragma: no cover # pylint: disable=W0703
logging.error('Unknown exception: ', exc_info=exc)
return self.on_internal_error() | Extracts the request, feeds the module, and returns the response. | entailment |
def dispatch(self):
"""Handles dispatching of the request."""
method_name = 'on_' + self.environ['REQUEST_METHOD'].lower()
method = getattr(self, method_name, None)
if method:
return method()
else:
return self.on_bad_method() | Handles dispatching of the request. | entailment |
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.user.User`:
This user instance after syncing.
"""
self = self.manager.get(username=self.username)
return self | Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.user.User`:
This user instance after syncing. | entailment |
def serialised( self ):
"""Tuple containing the contents of the Block."""
klass = self.__class__
return ((klass.__module__, klass.__name__), tuple( (name, field.serialise( self._field_data[name], parent=self ) ) for name, field in klass._fields.items())) | Tuple containing the contents of the Block. | entailment |
def clone_data( self, source ):
"""Clone data from another Block.
source
Block instance to copy from.
"""
klass = self.__class__
assert isinstance( source, klass )
for name in klass._fields:
self._field_data[name] = getattr( source, name ) | Clone data from another Block.
source
Block instance to copy from. | entailment |
def import_data( self, raw_buffer ):
"""Import data from a byte array.
raw_buffer
Byte array to import from.
"""
klass = self.__class__
if raw_buffer:
assert common.is_bytes( raw_buffer )
# raw_buffer = memoryview( raw_buffer )
self._field_data = {}
for name in klass._fields:
if raw_buffer:
self._field_data[name] = klass._fields[name].get_from_buffer(
raw_buffer, parent=self
)
else:
self._field_data[name] = klass._fields[name].default
if raw_buffer:
for name, check in klass._checks.items():
check.check_buffer( raw_buffer, parent=self )
# if we have debug logging on, check the roundtrip works
if logger.isEnabledFor( logging.INFO ):
test = self.export_data()
if logger.getEffectiveLevel() <= logging.DEBUG:
logger.debug( 'Stats for {}:'.format( self ) )
logger.debug( 'Import buffer size: {}'.format( len( raw_buffer ) ) )
logger.debug( 'Export size: {}'.format( len( test ) ) )
if test == raw_buffer:
logger.debug( 'Content: exact match!' )
elif test == raw_buffer[:len( test )]:
logger.debug( 'Content: partial match!' )
else:
logger.debug( 'Content: different!' )
for x in utils.hexdump_diff_iter( raw_buffer[:len( test )], test ):
logger.debug( x )
elif test != raw_buffer[:len( test )]:
logger.info( '{} export produced changed output from import'.format( self ) )
# if raw_buffer:
# raw_buffer.release()
return | Import data from a byte array.
raw_buffer
Byte array to import from. | entailment |
def export_data( self ):
"""Export data to a byte array."""
klass = self.__class__
output = bytearray( b'\x00'*self.get_size() )
# prevalidate all data before export.
# this is important to ensure that any dependent fields
# are updated beforehand, e.g. a count referenced
# in a BlockField
queue = []
for name in klass._fields:
self.scrub_field( name )
self.validate_field( name )
self.update_deps()
for name in klass._fields:
klass._fields[name].update_buffer_with_value(
self._field_data[name], output, parent=self
)
for name, check in klass._checks.items():
check.update_buffer( output, parent=self )
return output | Export data to a byte array. | entailment |
def update_deps( self ):
"""Update dependencies on all the fields on this Block instance."""
klass = self.__class__
for name in klass._fields:
self.update_deps_on_field( name )
return | Update dependencies on all the fields on this Block instance. | entailment |
def validate( self ):
"""Validate all the fields on this Block instance."""
klass = self.__class__
for name in klass._fields:
self.validate_field( name )
return | Validate all the fields on this Block instance. | entailment |
def get_size( self ):
"""Get the projected size (in bytes) of the exported data from this Block instance."""
klass = self.__class__
size = 0
for name in klass._fields:
size = max( size, klass._fields[name].get_end_offset( self._field_data[name], parent=self ) )
for check in klass._checks.values():
size = max( size, check.get_end_offset( parent=self ) )
return size | Get the projected size (in bytes) of the exported data from this Block instance. | entailment |
def save(self, path, compressed=True, exist_ok=False):
"""
Save the GADDAG to file.
Args:
path: path to save the GADDAG to.
compressed: compress the saved GADDAG using gzip.
exist_ok: overwrite existing file at `path`.
"""
path = os.path.expandvars(os.path.expanduser(path))
if os.path.isfile(path) and not exist_ok:
raise OSError(17, os.strerror(17), path)
if os.path.isdir(path):
path = os.path.join(path, "out.gdg")
if compressed:
bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode("ascii"))
else:
bytes_written = cgaddag.gdg_save(self.gdg, path.encode("ascii"))
if bytes_written == -1:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
return bytes_written | Save the GADDAG to file.
Args:
path: path to save the GADDAG to.
compressed: compress the saved GADDAG using gzip.
exist_ok: overwrite existing file at `path`. | entailment |
def load(self, path):
"""
Load a GADDAG from file, replacing the words currently in this GADDAG.
Args:
path: path to saved GADDAG to be loaded.
"""
path = os.path.expandvars(os.path.expanduser(path))
gdg = cgaddag.gdg_load(path.encode("ascii"))
if not gdg:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
self.__del__()
self.gdg = gdg.contents | Load a GADDAG from file, replacing the words currently in this GADDAG.
Args:
path: path to saved GADDAG to be loaded. | entailment |
def starts_with(self, prefix):
"""
Find all words starting with a prefix.
Args:
prefix: A prefix to be searched for.
Returns:
A list of all words found.
"""
prefix = prefix.lower()
found_words = []
res = cgaddag.gdg_starts_with(self.gdg, prefix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words | Find all words starting with a prefix.
Args:
prefix: A prefix to be searched for.
Returns:
A list of all words found. | entailment |
def contains(self, sub):
"""
Find all words containing a substring.
Args:
sub: A substring to be searched for.
Returns:
A list of all words found.
"""
sub = sub.lower()
found_words = set()
res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.add(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return list(found_words) | Find all words containing a substring.
Args:
sub: A substring to be searched for.
Returns:
A list of all words found. | entailment |
def ends_with(self, suffix):
"""
Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found.
"""
suffix = suffix.lower()
found_words = []
res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words | Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found. | entailment |
def add_word(self, word):
"""
Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG.
"""
word = word.lower()
if not (word.isascii() and word.isalpha()):
raise ValueError("Invalid character in word '{}'".format(word))
word = word.encode(encoding="ascii")
result = cgaddag.gdg_add_word(self.gdg, word)
if result == 1:
raise ValueError("Invalid character in word '{}'".format(word))
elif result == 2:
raise MemoryError("Out of memory, GADDAG is in an undefined state") | Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG. | entailment |
def formatLog(source="", level="", title="", data={}):
""" Similar to format, but takes additional reserved params to promote logging best-practices
:param level - severity of message - how bad is it?
:param source - application context - where did it come from?
:param title - brief description - what kind of event happened?
:param data - additional information - what details help to investigate?
"""
# consistently output empty string for unset params, because null values differ by language
source = "" if source is None else source
level = "" if level is None else level
title = "" if title is None else title
if not type(data) is dict:
data = {}
data['source'] = source
data['level'] = level
data['title'] = title
return format(data) | Similar to format, but takes additional reserved params to promote logging best-practices
:param level - severity of message - how bad is it?
:param source - application context - where did it come from?
:param title - brief description - what kind of event happened?
:param data - additional information - what details help to investigate? | entailment |
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
logs_path=self.logs_path,
results_path=self.results_path,
container_image=self.container_image,
container_type=self.container_type,
) | Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A task type model instance representing the task type
just updated. | entailment |
def create(
self,
name,
command_to_run,
container_image,
container_type,
description="",
logs_path="",
results_path="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
description (str, optional): The description of the task type.
logs_path (str, optional): The path of the logs directory
inside the container.
results_path (str, optional): The path of the results
directory inside the container.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A container task type model instance representing the
task type just created.
"""
# Add in extra data specific to container task types
if extra_data_to_post is None:
extra_data_to_post = {}
extra_data_to_post.update(
{
"container_image": container_image,
"container_type": container_type,
"logs_path": logs_path,
"results_path": results_path,
}
)
# Call the parent create function
return super(ContainerTaskTypeManager, self).create(
name=name,
command_to_run=command_to_run,
description=description,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
extra_data_to_post=extra_data_to_post,
) | Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
description (str, optional): The description of the task type.
logs_path (str, optional): The path of the logs directory
inside the container.
results_path (str, optional): The path of the results
directory inside the container.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ContainerTaskType`:
A container task type model instance representing the
task type just created. | entailment |
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
logs_path,
results_path,
container_image,
container_type,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
logs_path (str): The path of the logs directory inside the
container.
results_path (str): The path of the results directory inside
the container.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container.
"""
# Add in extra data specific to container task types
if extra_data_to_put is None:
extra_data_to_put = {}
extra_data_to_put.update(
{
"logs_path": logs_path,
"results_path": results_path,
"container_image": container_image,
"container_type": container_type,
}
)
# Call the parent create function
return super(ContainerTaskTypeManager, self).put(
id=id,
name=name,
description=description,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=(
required_arguments_default_values
),
extra_data_to_put=extra_data_to_put,
) | Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
logs_path (str): The path of the logs directory inside the
container.
results_path (str): The path of the results directory inside
the container.
container_image (str): The container name and tag. For
example, ubuntu:14.04 for Docker; and docker://ubuntu:14:04
or shub://vsoch/hello-world for Singularity.
container_type (str): The type of the container. | entailment |
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', '0' and ''. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0', ''):
return 0
else:
raise ValueError('Invalid truth value: {0}'.format(val)) | Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', '0' and ''. Raises ValueError if
'val' is anything else. | entailment |
def _str_to_list(value, separator):
"""Convert a string to a list with sanitization."""
value_list = [item.strip() for item in value.split(separator)]
value_list_sanitized = builtins.list(filter(None, value_list))
if len(value_list_sanitized) > 0:
return value_list_sanitized
else:
raise ValueError('Invalid list variable.') | Convert a string to a list with sanitization. | entailment |
def write(name, value):
"""Write a raw env value.
A ``None`` value clears the environment variable.
Args:
name: The environment variable name
value: The value to write
"""
if value is not None:
environ[name] = builtins.str(value)
elif environ.get(name):
del environ[name] | Write a raw env value.
A ``None`` value clears the environment variable.
Args:
name: The environment variable name
value: The value to write | entailment |
def read(name, default=None, allow_none=False, fallback=None):
"""Read the raw env value.
Read the raw environment variable or use the default. If the value is not
found and no default is set throw an exception.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
fallback: A list of fallback env variables to try and read if the primary environment
variable is unavailable.
"""
raw_value = environ.get(name)
if raw_value is None and fallback is not None:
if not isinstance(fallback, builtins.list) and not isinstance(fallback, builtins.tuple):
fallback = [fallback]
for fall in fallback:
raw_value = environ.get(fall)
if raw_value is not None:
break
if raw_value or raw_value == '':
return raw_value
elif default is not None or allow_none:
return default
else:
raise KeyError('Set the "{0}" environment variable'.format(name)) | Read the raw env value.
Read the raw environment variable or use the default. If the value is not
found and no default is set throw an exception.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
fallback: A list of fallback env variables to try and read if the primary environment
variable is unavailable. | entailment |
def str(name, default=None, allow_none=False, fallback=None):
"""Get a string based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
"""
value = read(name, default, allow_none, fallback=fallback)
if value is None and allow_none:
return None
else:
return builtins.str(value).strip() | Get a string based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional) | entailment |
def bool(name, default=None, allow_none=False, fallback=None):
"""Get a boolean based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
"""
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.bool):
return value
elif isinstance(value, builtins.int):
return True if value > 0 else False
elif value is None and allow_none:
return None
else:
value_str = builtins.str(value).lower().strip()
return _strtobool(value_str) | Get a boolean based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional) | entailment |
def int(name, default=None, allow_none=False, fallback=None):
"""Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
"""
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.str):
value = value.strip()
if value is None and allow_none:
return None
else:
return builtins.int(value) | Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional) | entailment |
def list(name, default=None, allow_none=False, fallback=None, separator=','):
"""Get a list of strings or the default.
The individual list elements are whitespace-stripped.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
separator: The list item separator character or pattern
"""
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.list):
return value
elif isinstance(value, builtins.str):
return _str_to_list(value, separator)
elif value is None and allow_none:
return None
else:
return [builtins.str(value)] | Get a list of strings or the default.
The individual list elements are whitespace-stripped.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
separator: The list item separator character or pattern | entailment |
def includeme(config):
"""this function adds some configuration for the application"""
config.add_route('references', '/references')
_add_referencer(config.registry)
config.add_view_deriver(protected_resources.protected_view)
config.add_renderer('json_item', json_renderer)
config.scan() | this function adds some configuration for the application | entailment |
def _add_referencer(registry):
"""
Gets the Referencer from config and adds it to the registry.
"""
referencer = registry.queryUtility(IReferencer)
if referencer is not None:
return referencer
ref = registry.settings['urireferencer.referencer']
url = registry.settings['urireferencer.registry_url']
r = DottedNameResolver()
registry.registerUtility(r.resolve(ref)(url), IReferencer)
return registry.queryUtility(IReferencer) | Gets the Referencer from config and adds it to the registry. | entailment |
def get_referencer(registry):
"""
Get the referencer class
:rtype: pyramid_urireferencer.referencer.AbstractReferencer
"""
# Argument might be a config or request
regis = getattr(registry, 'registry', None)
if regis is None:
regis = registry
return regis.queryUtility(IReferencer) | Get the referencer class
:rtype: pyramid_urireferencer.referencer.AbstractReferencer | entailment |
def _connect_to_ec2(region, credentials):
"""
:param region: The region of AWS to connect to.
:param EC2Credentials credentials: The credentials to use to authenticate
with EC2.
:return: a connection object to AWS EC2
"""
conn = boto.ec2.connect_to_region(
region,
aws_access_key_id=credentials.access_key_id,
aws_secret_access_key=credentials.secret_access_key
)
if conn:
return conn
else:
log_red('Failure to authenticate to EC2.')
return False | :param region: The region of AWS to connect to.
:param EC2Credentials credentials: The credentials to use to authenticate
with EC2.
:return: a connection object to AWS EC2 | entailment |
def write(self, *args, **kwargs):
"""
:param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value
"""
if args:
kwargs = dict(zip(self.header, args))
for header in kwargs:
cell = kwargs[header]
if not isinstance(cell, tuple):
cell = (cell,)
self.write_cell(self._row, self.header.index(header), *cell)
self._row += 1 | :param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value | entailment |
def clear_layout(layout: QLayout) -> None:
"""Clear the layout off all its components"""
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
clear_layout(item.layout()) | Clear the layout off all its components | entailment |
def _load_hangul_syllable_types():
"""
Helper function for parsing the contents of "HangulSyllableType.txt" from the Unicode Character Database (UCD) and
generating a lookup table for determining whether or not a given Hangul syllable is of type "L", "V", "T", "LV" or
"LVT". For more info on the UCD, see the following website: https://www.unicode.org/ucd/
"""
filename = "HangulSyllableType.txt"
current_dir = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp:
for line in fp:
if not line.strip() or line.startswith("#"):
continue # Skip empty lines or lines that are comments (comments start with '#')
data = line.strip().split(";")
syllable_type, _ = map(six.text_type.strip, data[1].split("#"))
if ".." in data[0]: # If it is a range and not a single value
start, end = map(lambda x: int(x, 16), data[0].strip().split(".."))
for idx in range(start, end + 1):
_hangul_syllable_types[idx] = syllable_type
else:
_hangul_syllable_types[int(data[0].strip(), 16)] = syllable_type | Helper function for parsing the contents of "HangulSyllableType.txt" from the Unicode Character Database (UCD) and
generating a lookup table for determining whether or not a given Hangul syllable is of type "L", "V", "T", "LV" or
"LVT". For more info on the UCD, see the following website: https://www.unicode.org/ucd/ | entailment |
def _load_jamo_short_names():
"""
Function for parsing the Jamo short names from the Unicode Character Database (UCD) and generating a lookup table
For more info on how this is used, see the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo Behavior" and
ch. 04, section 4.8, "Name".
https://www.unicode.org/versions/latest/ch03.pdf
https://www.unicode.org/versions/latest/ch04.pdf
"""
filename = "Jamo.txt"
current_dir = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp:
for line in fp:
if not line.strip() or line.startswith("#"):
continue # Skip empty lines or lines that are comments (comments start with '#')
data = line.strip().split(";")
code = int(data[0].strip(), 16)
char_info = data[1].split("#")
short_name = char_info[0].strip()
_jamo_short_names[code] = short_name | Function for parsing the Jamo short names from the Unicode Character Database (UCD) and generating a lookup table
For more info on how this is used, see the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo Behavior" and
ch. 04, section 4.8, "Name".
https://www.unicode.org/versions/latest/ch03.pdf
https://www.unicode.org/versions/latest/ch04.pdf | entailment |
def _get_hangul_syllable_type(hangul_syllable):
"""
Function for taking a Unicode scalar value representing a Hangul syllable and determining the correct value for its
Hangul_Syllable_Type property. For more information on the Hangul_Syllable_Type property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param hangul_syllable: Unicode scalar value representing a Hangul syllable
:return: Returns a string representing its Hangul_Syllable_Type property ("L", "V", "T", "LV" or "LVT")
"""
if not _is_hangul_syllable(hangul_syllable):
raise ValueError("Value 0x%0.4x does not represent a Hangul syllable!" % hangul_syllable)
if not _hangul_syllable_types:
_load_hangul_syllable_types()
return _hangul_syllable_types[hangul_syllable] | Function for taking a Unicode scalar value representing a Hangul syllable and determining the correct value for its
Hangul_Syllable_Type property. For more information on the Hangul_Syllable_Type property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param hangul_syllable: Unicode scalar value representing a Hangul syllable
:return: Returns a string representing its Hangul_Syllable_Type property ("L", "V", "T", "LV" or "LVT") | entailment |
def _get_jamo_short_name(jamo):
"""
Function for taking a Unicode scalar value representing a Jamo and determining the correct value for its
Jamo_Short_Name property. For more information on the Jamo_Short_Name property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param jamo: Unicode scalar value representing a Jamo
:return: Returns a string representing its Jamo_Short_Name property
"""
if not _is_jamo(jamo):
raise ValueError("Value 0x%0.4x passed in does not represent a Jamo!" % jamo)
if not _jamo_short_names:
_load_jamo_short_names()
return _jamo_short_names[jamo] | Function for taking a Unicode scalar value representing a Jamo and determining the correct value for its
Jamo_Short_Name property. For more information on the Jamo_Short_Name property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param jamo: Unicode scalar value representing a Jamo
:return: Returns a string representing its Jamo_Short_Name property | entailment |
def compose_hangul_syllable(jamo):
"""
Function for taking a tuple or list of Unicode scalar values representing Jamo and composing it into a Hangul
syllable. If the values in the list or tuple passed in are not in the ranges of Jamo, a ValueError will be raised.
The algorithm for doing the composition is described in the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo
Behavior."
Example: (U+1111, U+1171) -> U+D4CC
(U+D4CC, U+11B6) -> U+D4DB
(U+1111, U+1171, U+11B6) -> U+D4DB
:param jamo: Tuple of list of Jamo to compose
:return: Composed Hangul syllable
"""
fmt_str_invalid_sequence = "{0} does not represent a valid sequence of Jamo!"
if len(jamo) == 3:
l_part, v_part, t_part = jamo
if not (l_part in range(0x1100, 0x1112 + 1) and
v_part in range(0x1161, 0x1175 + 1) and
t_part in range(0x11a8, 0x11c2 + 1)):
raise ValueError(fmt_str_invalid_sequence.format(jamo))
l_index = l_part - L_BASE
v_index = v_part - V_BASE
t_index = t_part - T_BASE
lv_index = l_index * N_COUNT + v_index * T_COUNT
return S_BASE + lv_index + t_index
elif len(jamo) == 2:
if jamo[0] in range(0x1100, 0x1112 + 1) and jamo[1] in range(0x1161, 0x1175 + 1):
l_part, v_part = jamo
l_index = l_part - L_BASE
v_index = v_part - V_BASE
lv_index = l_index * N_COUNT + v_index * T_COUNT
return S_BASE + lv_index
elif _get_hangul_syllable_type(jamo[0]) == "LV" and jamo[1] in range(0x11a8, 0x11c2 + 1):
lv_part, t_part = jamo
t_index = t_part - T_BASE
return lv_part + t_index
else:
raise ValueError(fmt_str_invalid_sequence.format(jamo))
else:
raise ValueError(fmt_str_invalid_sequence.format(jamo)) | Function for taking a tuple or list of Unicode scalar values representing Jamo and composing it into a Hangul
syllable. If the values in the list or tuple passed in are not in the ranges of Jamo, a ValueError will be raised.
The algorithm for doing the composition is described in the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo
Behavior."
Example: (U+1111, U+1171) -> U+D4CC
(U+D4CC, U+11B6) -> U+D4DB
(U+1111, U+1171, U+11B6) -> U+D4DB
:param jamo: Tuple of list of Jamo to compose
:return: Composed Hangul syllable | entailment |
def decompose_hangul_syllable(hangul_syllable, fully_decompose=False):
"""
Function for taking a Unicode scalar value representing a Hangul syllable and decomposing it into a tuple
representing the scalar values of the decomposed (canonical decomposition) Jamo. If the Unicode scalar value
passed in is not in the range of Hangul syllable values (as defined in UnicodeData.txt), a ValueError will be
raised.
The algorithm for doing the decomposition is described in the Unicode Standard, ch. 03, section 3.12,
"Conjoining Jamo Behavior".
Example: U+D4DB -> (U+D4CC, U+11B6) # (canonical decomposition, default)
U+D4DB -> (U+1111, U+1171, U+11B6) # (full canonical decomposition)
:param hangul_syllable: Unicode scalar value for Hangul syllable
:param fully_decompose: Boolean indicating whether or not to do a canonical decomposition (default behavior is
fully_decompose=False) or a full canonical decomposition (fully_decompose=True)
:return: Tuple of Unicode scalar values for the decomposed Jamo.
"""
if not _is_hangul_syllable(hangul_syllable):
raise ValueError("Value passed in does not represent a Hangul syllable!")
s_index = hangul_syllable - S_BASE
if fully_decompose:
l_index = s_index // N_COUNT
v_index = (s_index % N_COUNT) // T_COUNT
t_index = s_index % T_COUNT
l_part = L_BASE + l_index
v_part = V_BASE + v_index
t_part = (T_BASE + t_index) if t_index > 0 else None
return l_part, v_part, t_part
else:
if _get_hangul_syllable_type(hangul_syllable) == "LV": # Hangul_Syllable_Type = LV
l_index = s_index // N_COUNT
v_index = (s_index % N_COUNT) // T_COUNT
l_part = L_BASE + l_index
v_part = V_BASE + v_index
return l_part, v_part
else: # Assume Hangul_Syllable_Type = LVT
lv_index = (s_index // T_COUNT) * T_COUNT
t_index = s_index % T_COUNT
lv_part = S_BASE + lv_index
t_part = T_BASE + t_index
return lv_part, t_part | Function for taking a Unicode scalar value representing a Hangul syllable and decomposing it into a tuple
representing the scalar values of the decomposed (canonical decomposition) Jamo. If the Unicode scalar value
passed in is not in the range of Hangul syllable values (as defined in UnicodeData.txt), a ValueError will be
raised.
The algorithm for doing the decomposition is described in the Unicode Standard, ch. 03, section 3.12,
"Conjoining Jamo Behavior".
Example: U+D4DB -> (U+D4CC, U+11B6) # (canonical decomposition, default)
U+D4DB -> (U+1111, U+1171, U+11B6) # (full canonical decomposition)
:param hangul_syllable: Unicode scalar value for Hangul syllable
:param fully_decompose: Boolean indicating whether or not to do a canonical decomposition (default behavior is
fully_decompose=False) or a full canonical decomposition (fully_decompose=True)
:return: Tuple of Unicode scalar values for the decomposed Jamo. | entailment |
def _get_hangul_syllable_name(hangul_syllable):
"""
Function for taking a Unicode scalar value representing a Hangul syllable and converting it to its syllable name as
defined by the Unicode naming rule NR1. See the Unicode Standard, ch. 04, section 4.8, Names, for more information.
:param hangul_syllable: Unicode scalar value representing the Hangul syllable to convert
:return: String representing its syllable name as transformed according to naming rule NR1.
"""
if not _is_hangul_syllable(hangul_syllable):
raise ValueError("Value passed in does not represent a Hangul syllable!")
jamo = decompose_hangul_syllable(hangul_syllable, fully_decompose=True)
result = ''
for j in jamo:
if j is not None:
result += _get_jamo_short_name(j)
return result | Function for taking a Unicode scalar value representing a Hangul syllable and converting it to its syllable name as
defined by the Unicode naming rule NR1. See the Unicode Standard, ch. 04, section 4.8, Names, for more information.
:param hangul_syllable: Unicode scalar value representing the Hangul syllable to convert
:return: String representing its syllable name as transformed according to naming rule NR1. | entailment |
def generate_client_callers(spec, timeout, error_callback, local, app):
"""Return a dict mapping method names to anonymous functions that
will call the server's endpoint of the corresponding name as
described in the api defined by the swagger dict and bravado spec"""
callers_dict = {}
def mycallback(endpoint):
if not endpoint.handler_client:
return
callers_dict[endpoint.handler_client] = _generate_client_caller(spec, endpoint, timeout, error_callback, local, app)
spec.call_on_each_endpoint(mycallback)
return callers_dict | Return a dict mapping method names to anonymous functions that
will call the server's endpoint of the corresponding name as
described in the api defined by the swagger dict and bravado spec | entailment |
def _call_retry(self, force_retry):
"""Call request and retry up to max_attempts times (or none if self.max_attempts=1)"""
last_exception = None
for i in range(self.max_attempts):
try:
log.info("Calling %s %s" % (self.method, self.url))
response = self.requests_method(
self.url,
data=self.data,
params=self.params,
headers=self.headers,
timeout=(self.connect_timeout, self.read_timeout),
verify=self.verify_ssl,
)
if response is None:
log.warn("Got response None")
if self._method_is_safe_to_retry():
delay = 0.5 + i * 0.5
log.info("Waiting %s sec and Retrying since call is a %s" % (delay, self.method))
time.sleep(delay)
continue
else:
raise PyMacaronCoreException("Call %s %s returned empty response" % (self.method, self.url))
return response
except Exception as e:
last_exception = e
retry = force_retry
if isinstance(e, ReadTimeout):
# Log enough to help debugging...
log.warn("Got a ReadTimeout calling %s %s" % (self.method, self.url))
log.warn("Exception was: %s" % str(e))
resp = e.response
if not resp:
log.info("Requests error has no response.")
# TODO: retry=True? Is it really safe?
else:
b = resp.content
log.info("Requests has a response with content: " + pprint.pformat(b))
if self._method_is_safe_to_retry():
# It is safe to retry
log.info("Retrying since call is a %s" % self.method)
retry = True
elif isinstance(e, ConnectTimeout):
log.warn("Got a ConnectTimeout calling %s %s" % (self.method, self.url))
log.warn("Exception was: %s" % str(e))
# ConnectTimeouts are safe to retry whatever the call...
retry = True
if retry:
continue
else:
raise e
# max_attempts has been reached: propagate the last received Exception
if not last_exception:
last_exception = Exception("Reached max-attempts (%s). Giving up calling %s %s" % (self.max_attempts, self.method, self.url))
raise last_exception | Call request and retry up to max_attempts times (or none if self.max_attempts=1) | entailment |
def syllabify(word):
'''Syllabify the given word, whether simplex or complex.'''
word = split(word) # detect any non-delimited compounds
compound = True if re.search(r'-| |\.', word) else False
syllabify = _syllabify_compound if compound else _syllabify
syll, rules = syllabify(word)
yield syll, rules
n = 3
if 'T4' in rules:
yield syllabify(word, T4=False)
n -= 1
# yield empty syllabifications and rules
for n in range(3):
yield '', '' | Syllabify the given word, whether simplex or complex. | entailment |
def _syllabify(word, T4=True):
'''Syllabify the given word.'''
word = replace_umlauts(word)
word, rules = apply_T1(word)
if re.search(r'[^ieAyOauo]*([ieAyOauo]{2})[^ieAyOauo]*', word):
word, T2 = apply_T2(word)
word, T8 = apply_T8(word)
word, T9 = apply_T9(word)
word, T4 = apply_T4(word) if T4 else (word, '')
rules += T2 + T8 + T9 + T4
if re.search(r'[ieAyOauo]{3}', word):
word, T6 = apply_T6(word)
word, T5 = apply_T5(word)
word, T7 = apply_T7(word)
word, T2 = apply_T2(word)
rules += T5 + T6 + T7 + T2
word = replace_umlauts(word, put_back=True)
rules = rules or ' T0' # T0 means no rules have applied
return word, rules | Syllabify the given word. | entailment |
def apply_T1(word):
'''There is a syllable boundary in front of every CV-sequence.'''
# split consonants and vowels: 'balloon' -> ['b', 'a', 'll', 'oo', 'n']
WORD = [w for w in re.split('([ieAyOauo]+)', word) if w]
count = 0
for i, v in enumerate(WORD):
if i == 0 and is_consonant(v[0]):
continue
elif is_consonant(v[0]) and i + 1 != len(WORD):
if is_cluster(v): # WSP
if count % 2 == 0:
WORD[i] = v[0] + '.' + v[1:] # CC > C.C, CCC > C.CC
else:
WORD[i] = '.' + v # CC > .CC, CCC > .CCC
# elif is_sonorant(v[0]) and is_cluster(v[1:]): # NEW
# if count % 2 == 0:
# WORD[i] = v[0:2] + '.' + v[2:]
# else:
# WORD[i] = v[0] + '.' + v[1:]
else:
WORD[i] = v[:-1] + '.' + v[-1] # CC > C.C, CCC > CC.C
count += 1
WORD = ''.join(WORD)
RULE = ' T1' if word != WORD else ''
return WORD, RULE | There is a syllable boundary in front of every CV-sequence. | entailment |
def extended_cigar(aligned_template, aligned_query):
''' Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True
'''
# - Go through each position in the alignment
insertion = []
deletion = []
matches = []
cigar = []
for r_aa, q_aa in zip(aligned_template.lower(), aligned_query.lower()):
gap_ref = r_aa == '-'
gap_que = q_aa == '-'
match = r_aa == q_aa
if matches and not match:
# End match block
cigar.append(":%s"%len(matches))
matches = []
if insertion and not gap_ref:
# End insertion
cigar.append("+%s"%''.join(insertion))
insertion = []
elif deletion and not gap_que:
# End deletion
cigar.append("-%s"%''.join(deletion))
deletion = []
if gap_ref:
if insertion:
# Extend insertion
insertion.append(q_aa)
else:
# Start insertion
insertion = [q_aa]
elif gap_que:
if deletion:
# Extend deletion
deletion.append(r_aa)
else:
# Start deletion
deletion = [r_aa]
elif match:
if matches:
# Extend match block
matches.append(r_aa)
else:
# Start match block
matches = [r_aa]
else:
# Add SNP annotation
cigar.append("*%s%s"%(r_aa, q_aa))
if matches:
cigar.append(":%s"%len(matches))
del matches
if insertion:
# End insertion
cigar.append("+%s"%''.join(insertion))
del insertion
elif deletion:
# End deletion
cigar.append("-%s"%''.join(deletion))
del deletion
return ''.join(cigar) | Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True | entailment |
def cigar2query(template, cigar):
''' Generate query sequence from the template and extended cigar annotation
USAGE:
>>> template = 'CGATCGATAAATAGAGTAGGAATAGCA'
>>> cigar = ':6-ata:10+gtc:4*at:3'
>>> cigar2query(template, cigar) == 'CGATCGAATAGAGTAGGTCGAATtGCA'.upper()
True
'''
query = []
entries = ['+','-','*',':']
number = list(map(str,range(10)))
cigar_length = len(cigar)
num = []
entry = None
pos = 0
i = 0
while i < cigar_length:
if cigar[i] in entries:
# New entry
if entry == ':':
old_pos = pos
pos += int(''.join(num))
query.append(template[old_pos:pos])
num = []
entry = cigar[i]
if entry == '*':
i += 2
query.append(cigar[i])
pos += 1
elif cigar[i] in number:
num.append(cigar[i])
elif entry == '-':
pos += 1
elif entry == '+':
query.append(cigar[i])
i += 1
if entry == ':':
old_pos = pos
pos += int(''.join(num))
query.append(template[old_pos:pos])
return ''.join(query).upper() | Generate query sequence from the template and extended cigar annotation
USAGE:
>>> template = 'CGATCGATAAATAGAGTAGGAATAGCA'
>>> cigar = ':6-ata:10+gtc:4*at:3'
>>> cigar2query(template, cigar) == 'CGATCGAATAGAGTAGGTCGAATtGCA'.upper()
True | entailment |
def Blaster(inputfile, databases, db_path, out_path='.', min_cov=0.6,
threshold=0.9, blast='blastn', cut_off=True):
''' BLAST wrapper method, that takes a simple input and produces a overview
list of the hits to templates, and their alignments
Usage
>>> import os, subprocess, collections
>>> from Bio.Blast import NCBIXML
>>> from Bio import SeqIO
>>> from string import maketrans
>>> inputfile = 'test.fsa'
>>> databases = ['enterobacteriaceae']
>>> db_path = '/path/to/databases/plasmidfinder/'
>>> Blaster(inputfile, databases, db_path)
'''
min_cov = 100 * float(min_cov)
threshold = 100 * float(threshold)
# For alignment
gene_align_query = dict() #will contain the sequence alignment lines
gene_align_homo = dict() #will contain the sequence alignment homolog string
gene_align_sbjct = dict() #will contain the sequence alignment allele string
results = dict() #will contain the results
for db in databases:
# Adding the path to the database and output
db_file = "%s/%s.fsa"%(db_path, db)
os.system("mkdir -p %s/tmp"%(out_path))
os.system("chmod 775 %s/tmp"%(out_path))
out_file = "%s/tmp/out_%s.xml"%(out_path, db)
# Running blast
cmd = "%s -subject %s -query %s -out %s -outfmt '5' -perc_identity %s -dust 'no'"%(blast, db_file, inputfile, out_file, threshold)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
# Getting the results
result_handle = open(out_file)
blast_records = NCBIXML.parse(result_handle)
# Declaring variables for saving the results
gene_results = dict() #will contain the results for each gene
# For finding the best hits
best_hsp = dict()
# Keeping track of gene split
gene_split = collections.defaultdict(dict)
# Making the dicts for sequence outputs
gene_align_query[db] = dict()
gene_align_homo[db] = dict()
gene_align_sbjct[db] = dict()
# Parsing over the hits and only keeping the best
for blast_record in blast_records:
query = blast_record.query
blast_record.alignments.sort(key = lambda align: -max((len(hsp.query) * (int(hsp.identities)/float(len(hsp.query))) for hsp in align.hsps)))
for alignment in blast_record.alignments:
# Setting the e-value as 1 and bit as 0 to get the best HSP fragment
best_e_value = 1
best_bit = 0
for hsp in alignment.hsps:
if hsp.expect < best_e_value or hsp.bits > best_bit:
best_e_value = hsp.expect
best_bit = hsp.bits
tmp = alignment.title.split(" ")
sbjct_header = tmp[1]
bit = hsp.bits
sbjct_length = alignment.length
sbjct_start = hsp.sbjct_start
sbjct_end = hsp.sbjct_end
gaps = hsp.gaps
query_string = str(hsp.query)
homo_string = str(hsp.match)
sbjct_string = str(hsp.sbjct)
contig_name = query.replace(">","")
query_start = hsp.query_start
query_end = hsp.query_end
HSP_length = len(query_string)
perc_ident = int(hsp.identities)/float(HSP_length) * 100
strand = 0
coverage = ((int(HSP_length) - int(gaps))/float(sbjct_length))
perc_coverage = ((int(HSP_length) - int(gaps))/float(sbjct_length)) * 100
if int(HSP_length) == int(sbjct_length):
cal_score = perc_ident * coverage * 100
else:
cal_score = perc_ident * coverage
hit_id = "%s:%s..%s:%s:%f"%(contig_name, query_start, query_end, sbjct_header, cal_score)
# If the hit is on the other strand
if sbjct_start > sbjct_end:
tmp = sbjct_start
sbjct_start = sbjct_end
sbjct_end = tmp
query_string = reverse_complement(query_string)
homo_string = homo_string[::-1]
sbjct_string = reverse_complement(sbjct_string)
strand = 1
if cut_off == True:
if perc_coverage > 20 :
best_hsp = {'evalue': hsp.expect, 'sbjct_header': sbjct_header, 'bit': bit,
'perc_ident': perc_ident, 'sbjct_length':sbjct_length,
'sbjct_start': sbjct_start, 'sbjct_end': sbjct_end,
'gaps': gaps, 'query_string': query_string,
'homo_string': homo_string, 'sbjct_string': sbjct_string,
'contig_name': contig_name, 'query_start': query_start,
'query_end': query_end, 'HSP_length': HSP_length, 'coverage': coverage,
'cal_score': cal_score, 'hit_id': hit_id, 'strand': strand,
'perc_coverage': perc_coverage
}
else:
best_hsp = {'evalue': hsp.expect, 'sbjct_header': sbjct_header, 'bit': bit,
'perc_ident': perc_ident, 'sbjct_length':sbjct_length,
'sbjct_start': sbjct_start, 'sbjct_end': sbjct_end,
'gaps': gaps, 'query_string': query_string,
'homo_string': homo_string, 'sbjct_string': sbjct_string,
'contig_name': contig_name, 'query_start': query_start,
'query_end': query_end, 'HSP_length': HSP_length, 'coverage': coverage,
'cal_score': cal_score, 'hit_id': hit_id, 'strand': strand,
'perc_coverage': perc_coverage
}
# Saving the result if any
if best_hsp:
save = 1
# If there are other gene alignments they are compared
if gene_results:
tmp_gene_split = gene_split
tmp_results = gene_results
# Compare the hit results
save, gene_split, gene_results = compare_results(save, best_hsp, tmp_results, tmp_gene_split)
# If the hit is not overlapping with other hit seqeunces it is kept
if save == 1:
gene_results[hit_id] = best_hsp
else:
pass
# If the hit does not cover the entire database reference the missing seqence data are extracted
for hit_id in list(gene_results):
hit = gene_results[hit_id]
# Calculate possible split gene coverage
perc_coverage = hit['perc_coverage']
if hit['sbjct_header'] in gene_split and len(gene_split[hit['sbjct_header']]) > 1:
# Calculate new length
new_length = calculate_new_length(gene_split, gene_results, hit)
hit['split_length'] = new_length
# Calculate new coverage
perc_coverage = new_length / float(hit['sbjct_length']) * 100
# If the hit is above the minimum length threshold it is kept
if perc_coverage >= min_cov:
if hit['coverage'] == 1:
gene_align_query[db][hit_id] = hit['query_string']
gene_align_homo[db][hit_id] = hit['homo_string']
gene_align_sbjct[db][hit_id] = hit['sbjct_string']
elif hit['coverage'] != 1:
# Getting the whole database sequence
for seq_record in SeqIO.parse(db_file, "fasta"):
if seq_record.description == hit['sbjct_header']:
gene_align_sbjct[db][hit_id] = str(seq_record.seq)
break
# Getting the whole contig to extract extra query seqeunce
contig = ''
for seq_record in SeqIO.parse(inputfile, "fasta"):
if seq_record.description == hit['contig_name']:
contig = str(seq_record.seq)
break
# Extract extra sequence from query
query_seq, homo_seq = get_query_align(hit, contig)
# Saving the new alignment sequences
gene_align_query[db][hit_id] = query_seq
gene_align_homo[db][hit_id] = homo_seq
else:
del gene_results[hit_id]
if hit['sbjct_header'] in gene_split:
del gene_split[hit['sbjct_header']]
# Save the database result
if gene_results:
results[db] = gene_results
else:
results[db] = "No hit found"
return (results, gene_align_query, gene_align_homo, gene_align_sbjct) | BLAST wrapper method, that takes a simple input and produces a overview
list of the hits to templates, and their alignments
Usage
>>> import os, subprocess, collections
>>> from Bio.Blast import NCBIXML
>>> from Bio import SeqIO
>>> from string import maketrans
>>> inputfile = 'test.fsa'
>>> databases = ['enterobacteriaceae']
>>> db_path = '/path/to/databases/plasmidfinder/'
>>> Blaster(inputfile, databases, db_path) | entailment |
def compare_results(save, best_hsp, tmp_results, tmp_gene_split):
''' Function for comparing hits and saving only the best hit '''
# Get data for comparison
hit_id = best_hsp['hit_id']
new_start_query = best_hsp['query_start']
new_end_query = best_hsp['query_end']
new_start_sbjct = int(best_hsp['sbjct_start'])
new_end_sbjct = int(best_hsp['sbjct_end'])
new_score = best_hsp['cal_score']
new_db_hit = best_hsp['sbjct_header']
new_contig = best_hsp['contig_name']
new_HSP = best_hsp['HSP_length']
# See if the best HSP fragment overlap with another allignment and keep the
# allignment with the highest score - if the new fragment is not providing new seqeunce
for hit in list(tmp_results):
hit_data = tmp_results[hit]
old_start_query = hit_data['query_start']
old_end_query = hit_data['query_end']
old_start_sbjct = int(hit_data['sbjct_start'])
old_end_sbjct = int(hit_data['sbjct_end'])
old_score = hit_data['cal_score']
old_db_hit = hit_data['sbjct_header']
old_contig = hit_data['contig_name']
old_HSP = hit_data['HSP_length']
remove_old = 0
# If they align to the same gene in the database they are compared
if new_db_hit == old_db_hit:
# If the hit provids additional sequence it is kept and the new coverage is saved
# otherwise the one with the highest score is kept
if new_start_sbjct < (old_start_sbjct) or new_end_sbjct > (old_end_sbjct):
# Save the hits as splitted
tmp_gene_split[old_db_hit][hit_id] = 1
if not hit in tmp_gene_split[old_db_hit]:
tmp_gene_split[old_db_hit][hit] = 1
else:
if new_score > old_score:
# Set to remove old hit
remove_old = 1
# Save a split if the new hit still creats one
if new_db_hit in tmp_gene_split and not hit_id in tmp_gene_split[new_db_hit]:
tmp_gene_split[new_db_hit][hit_id] = 1
else:
save = 0
# If the old and new hit is not identical the possible saved gene split for the new hit is removed
if hit_id != hit:
if new_db_hit in tmp_gene_split and hit_id in tmp_gene_split[new_db_hit]:
del tmp_gene_split[new_db_hit][hit_id]
break
# If the hits comes form the same part of the contig sequnce but match different genes only the best hit is kept
if new_contig == old_contig:
# if the two hits cover the exact same place on the contig only
# the percentage of identity is compared
if old_start_query == new_start_query and old_end_query == new_end_query:
if best_hsp['perc_ident'] > hit_data['perc_ident']:
# Set to remove old hit
remove_old = 1
# Save a split if the new hit still creats one
if new_db_hit in tmp_gene_split and not hit_id in tmp_gene_split[new_db_hit]:
tmp_gene_split[new_db_hit][hit_id] = 1
elif best_hsp['perc_ident'] == hit_data['perc_ident']:
# Save both
# Save a split if the new hit still creats one
if new_db_hit in tmp_gene_split and not hit_id in tmp_gene_split[new_db_hit]:
tmp_gene_split[new_db_hit][hit_id] = 1
else:
save = 0
# Remove new gene from gene split if present
if new_db_hit in tmp_gene_split and hit_id in tmp_gene_split[new_db_hit]:
del tmp_gene_split[new_db_hit][hit_id]
break
elif (max(old_end_query, new_end_query) - min(old_start_query, new_start_query)) <= ((old_end_query - old_start_query) + (new_end_query - new_start_query)):
if new_score > old_score:
# Set to remove old gene
remove_old = 1
# Save a split if the new hit still creats one
if new_db_hit in tmp_gene_split and not hit_id in tmp_gene_split[new_db_hit]:
tmp_gene_split[new_db_hit][hit_id] = 1
elif new_score == old_score:
# If both genes are completly covered the longest hit is chosen
if int(best_hsp['perc_coverage']) == 100 and int(hit_data['perc_coverage']) == 100 and new_HSP > old_HSP:
# Set to remove old gene
remove_old = 1
# Save a split if the new hit creats one - both hits are saved
if new_db_hit in tmp_gene_split and not hit_id in tmp_gene_split[new_db_hit]:
tmp_gene_split[new_db_hit][hit_id] = 1
else:
# Remove new gene from gene split if present
if new_db_hit in tmp_gene_split and hit_id in tmp_gene_split[new_db_hit]:
del tmp_gene_split[new_db_hit][hit_id]
save = 0
break
# Remove old hit if new hit is better
if remove_old == 1:
del tmp_results[hit]
# Remove gene from gene split if present
if old_db_hit in tmp_gene_split and hit in tmp_gene_split[old_db_hit]:
del tmp_gene_split[old_db_hit][hit]
return save, tmp_gene_split, tmp_results | Function for comparing hits and saving only the best hit | entailment |
def calculate_new_length(gene_split, gene_results, hit):
''' Function for calcualting new length if the gene is split on several
contigs
'''
# Looping over splitted hits and calculate new length
first = 1
for split in gene_split[hit['sbjct_header']]:
new_start = int(gene_results[split]['sbjct_start'])
new_end = int(gene_results[split]['sbjct_end'])
# Get the frist HSP
if first == 1:
new_length = int(gene_results[split]['HSP_length'])
old_start = new_start
old_end = new_end
first = 0
continue
if new_start < old_start:
new_length = new_length + (old_start - new_start)
old_start = new_start
if new_end > old_end:
new_length = new_length + (new_end - old_end)
old_end = new_end
return(new_length) | Function for calcualting new length if the gene is split on several
contigs | entailment |
def stream_to_packet(data):
"""
Chop a stream of data into MODBUS packets.
:param data: stream of data
:returns: a tuple of the data that is a packet with the remaining
data, or ``None``
"""
if len(data) < 6:
return None
# unpack the length
pktlen = struct.unpack(">H", data[4:6])[0] + 6
if (len(data) < pktlen):
return None
return (data[:pktlen], data[pktlen:]) | Chop a stream of data into MODBUS packets.
:param data: stream of data
:returns: a tuple of the data that is a packet with the remaining
data, or ``None`` | entailment |
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value) | Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware. | entailment |
def get_vowel(syll):
'''Return the firstmost vowel in 'syll'.'''
return re.search(r'([ieaouäöy]{1})', syll, flags=FLAGS).group(1).upper() | Return the firstmost vowel in 'syll'. | entailment |
def is_light(syll):
'''Return True if 'syll' is light.'''
return re.match(r'(^|[^ieaouäöy]+)[ieaouäöy]{1}$', syll, flags=FLAGS) | Return True if 'syll' is light. | entailment |
def stress(syllabified_simplex_word):
'''Assign primary and secondary stress to 'syllabified_simplex_word'.'''
syllables = syllabified_simplex_word.split('.')
stressed = '\'' + syllables[0] # primary stress
try:
n = 0
medial = syllables[1:-1]
for i, syll in enumerate(medial):
if (i + n) % 2 == 0:
stressed += '.' + syll
else:
try:
if is_light(syll) and is_heavy(medial[i + 1]):
stressed += '.' + syll
n += 1
continue
except IndexError:
pass
# secondary stress
stressed += '.`' + syll
except IndexError:
pass
if len(syllables) > 1:
stressed += '.' + syllables[-1]
return stressed | Assign primary and secondary stress to 'syllabified_simplex_word'. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.