repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
openvax/varcode
varcode/effects/mutate.py
insert_after
def insert_after(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` after `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 <= offset < len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset + 1] suffix = sequence[offset + 1:] return prefix + new_residues + suffix
python
def insert_after(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` after `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 <= offset < len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset + 1] suffix = sequence[offset + 1:] return prefix + new_residues + suffix
Mutate the given sequence by inserting the string `new_residues` after `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/mutate.py#L40-L60
openvax/varcode
varcode/effects/mutate.py
substitute
def substitute(sequence, offset, ref, alt): """Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert """ n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref), \ "Reference %s at offset %d != expected reference %s" % \ (sequence_ref, offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
python
def substitute(sequence, offset, ref, alt): """Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert """ n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref), \ "Reference %s at offset %d != expected reference %s" % \ (sequence_ref, offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/mutate.py#L62-L87
openvax/varcode
varcode/reference.py
_most_recent_assembly
def _most_recent_assembly(assembly_names): """ Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name) """ match_recency = [ int(re.search('\d+', assembly_name).group()) for assembly_name in assembly_names ] most_recent = [ x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0] return most_recent
python
def _most_recent_assembly(assembly_names): """ Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name) """ match_recency = [ int(re.search('\d+', assembly_name).group()) for assembly_name in assembly_names ] most_recent = [ x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0] return most_recent
Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name)
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/reference.py#L45-L56
openvax/varcode
varcode/reference.py
infer_reference_name
def infer_reference_name(reference_name_or_path): """ Given a string containing a reference name (such as a path to that reference's FASTA file), return its canonical name as used by Ensembl. """ # identify all cases where reference name or path matches candidate aliases reference_file_name = os.path.basename(reference_name_or_path) matches = {'file_name': list(), 'full_path': list()} for assembly_name in reference_alias_dict.keys(): candidate_list = [assembly_name] + reference_alias_dict[assembly_name] for candidate in candidate_list: if candidate.lower() in reference_file_name.lower(): matches['file_name'].append(assembly_name) elif candidate.lower() in reference_name_or_path.lower(): matches['full_path'].append(assembly_name) # remove duplicate matches (happens due to overlapping aliases) matches['file_name'] = list(set(matches['file_name'])) matches['full_path'] = list(set(matches['full_path'])) # given set of existing matches, choose one to return # (first select based on file_name, then full path. If multiples, use most recent) if len(matches['file_name']) == 1: match = matches['file_name'][0] elif len(matches['file_name']) > 1: # separate logic for >1 vs 1 to give informative warning match = _most_recent_assembly(matches['file_name']) warn( ('More than one reference ({}) matches path in header ({}); ' 'the most recent one ({}) was used.').format( ','.join(matches['file_name']), reference_file_name, match)) elif len(matches['full_path']) >= 1: # combine full-path logic since warning is the same match = _most_recent_assembly(matches['full_path']) warn(( 'Reference could not be matched against filename ({}); ' 'using best match against full path ({}).').format( reference_name_or_path, match)) else: raise ValueError( "Failed to infer genome assembly name for %s" % reference_name_or_path) return match
python
def infer_reference_name(reference_name_or_path): """ Given a string containing a reference name (such as a path to that reference's FASTA file), return its canonical name as used by Ensembl. """ # identify all cases where reference name or path matches candidate aliases reference_file_name = os.path.basename(reference_name_or_path) matches = {'file_name': list(), 'full_path': list()} for assembly_name in reference_alias_dict.keys(): candidate_list = [assembly_name] + reference_alias_dict[assembly_name] for candidate in candidate_list: if candidate.lower() in reference_file_name.lower(): matches['file_name'].append(assembly_name) elif candidate.lower() in reference_name_or_path.lower(): matches['full_path'].append(assembly_name) # remove duplicate matches (happens due to overlapping aliases) matches['file_name'] = list(set(matches['file_name'])) matches['full_path'] = list(set(matches['full_path'])) # given set of existing matches, choose one to return # (first select based on file_name, then full path. If multiples, use most recent) if len(matches['file_name']) == 1: match = matches['file_name'][0] elif len(matches['file_name']) > 1: # separate logic for >1 vs 1 to give informative warning match = _most_recent_assembly(matches['file_name']) warn( ('More than one reference ({}) matches path in header ({}); ' 'the most recent one ({}) was used.').format( ','.join(matches['file_name']), reference_file_name, match)) elif len(matches['full_path']) >= 1: # combine full-path logic since warning is the same match = _most_recent_assembly(matches['full_path']) warn(( 'Reference could not be matched against filename ({}); ' 'using best match against full path ({}).').format( reference_name_or_path, match)) else: raise ValueError( "Failed to infer genome assembly name for %s" % reference_name_or_path) return match
Given a string containing a reference name (such as a path to that reference's FASTA file), return its canonical name as used by Ensembl.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/reference.py#L59-L99
openvax/varcode
varcode/reference.py
infer_genome
def infer_genome(genome_object_string_or_int): """ If given an integer, return associated human EnsemblRelease for that Ensembl version. If given a string, return latest EnsemblRelease which has a reference of the same name. If given a PyEnsembl Genome, simply return it. """ if isinstance(genome_object_string_or_int, Genome): return genome_object_string_or_int if is_integer(genome_object_string_or_int): return cached_release(genome_object_string_or_int) elif is_string(genome_object_string_or_int): # first infer the canonical reference name, e.g. mapping hg19 -> GRCh37 # and then get the associated PyEnsembl Genome object reference_name = infer_reference_name(genome_object_string_or_int) return genome_for_reference_name(reference_name) else: raise TypeError( ("Expected genome to be an int, string, or pyensembl.Genome " "instance, got %s : %s") % ( str(genome_object_string_or_int), type(genome_object_string_or_int)))
python
def infer_genome(genome_object_string_or_int): """ If given an integer, return associated human EnsemblRelease for that Ensembl version. If given a string, return latest EnsemblRelease which has a reference of the same name. If given a PyEnsembl Genome, simply return it. """ if isinstance(genome_object_string_or_int, Genome): return genome_object_string_or_int if is_integer(genome_object_string_or_int): return cached_release(genome_object_string_or_int) elif is_string(genome_object_string_or_int): # first infer the canonical reference name, e.g. mapping hg19 -> GRCh37 # and then get the associated PyEnsembl Genome object reference_name = infer_reference_name(genome_object_string_or_int) return genome_for_reference_name(reference_name) else: raise TypeError( ("Expected genome to be an int, string, or pyensembl.Genome " "instance, got %s : %s") % ( str(genome_object_string_or_int), type(genome_object_string_or_int)))
If given an integer, return associated human EnsemblRelease for that Ensembl version. If given a string, return latest EnsemblRelease which has a reference of the same name. If given a PyEnsembl Genome, simply return it.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/reference.py#L102-L126
openvax/varcode
varcode/variant.py
Variant.to_dict
def to_dict(self): """ We want the original values (un-normalized) field values while serializing since normalization will happen in __init__. """ return dict( contig=self.original_contig, start=self.original_start, ref=self.original_ref, alt=self.original_alt, ensembl=self.ensembl, allow_extended_nucleotides=self.allow_extended_nucleotides, normalize_contig_name=self.normalize_contig_name)
python
def to_dict(self): """ We want the original values (un-normalized) field values while serializing since normalization will happen in __init__. """ return dict( contig=self.original_contig, start=self.original_start, ref=self.original_ref, alt=self.original_alt, ensembl=self.ensembl, allow_extended_nucleotides=self.allow_extended_nucleotides, normalize_contig_name=self.normalize_contig_name)
We want the original values (un-normalized) field values while serializing since normalization will happen in __init__.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L213-L225
openvax/varcode
varcode/variant.py
Variant.short_description
def short_description(self): """ HGVS nomenclature for genomic variants More info: http://www.hgvs.org/mutnomen/ """ if self.is_insertion: return "chr%s g.%d_%dins%s" % ( self.contig, self.start, self.start + 1, self.alt) elif self.is_deletion: return "chr%s g.%d_%ddel%s" % ( self.contig, self.start, self.end, self.ref) elif self.ref == self.alt: return "chr%s g.%d%s" % (self.contig, self.start, self.ref) else: # substitution return "chr%s g.%d%s>%s" % ( self.contig, self.start, self.ref, self.alt)
python
def short_description(self): """ HGVS nomenclature for genomic variants More info: http://www.hgvs.org/mutnomen/ """ if self.is_insertion: return "chr%s g.%d_%dins%s" % ( self.contig, self.start, self.start + 1, self.alt) elif self.is_deletion: return "chr%s g.%d_%ddel%s" % ( self.contig, self.start, self.end, self.ref) elif self.ref == self.alt: return "chr%s g.%d%s" % (self.contig, self.start, self.ref) else: # substitution return "chr%s g.%d%s>%s" % ( self.contig, self.start, self.ref, self.alt)
HGVS nomenclature for genomic variants More info: http://www.hgvs.org/mutnomen/
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L268-L293
openvax/varcode
varcode/variant.py
Variant.genes
def genes(self): """ Return Gene object for all genes which overlap this variant. """ if self._genes is None: self._genes = self.ensembl.genes_at_locus( self.contig, self.start, self.end) return self._genes
python
def genes(self): """ Return Gene object for all genes which overlap this variant. """ if self._genes is None: self._genes = self.ensembl.genes_at_locus( self.contig, self.start, self.end) return self._genes
Return Gene object for all genes which overlap this variant.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L322-L329
openvax/varcode
varcode/variant.py
Variant.gene_ids
def gene_ids(self): """ Return IDs of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object. """ return self.ensembl.gene_ids_at_locus( self.contig, self.start, self.end)
python
def gene_ids(self): """ Return IDs of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object. """ return self.ensembl.gene_ids_at_locus( self.contig, self.start, self.end)
Return IDs of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L332-L339
openvax/varcode
varcode/variant.py
Variant.gene_names
def gene_names(self): """ Return names of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object. """ return self.ensembl.gene_names_at_locus( self.contig, self.start, self.end)
python
def gene_names(self): """ Return names of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object. """ return self.ensembl.gene_names_at_locus( self.contig, self.start, self.end)
Return names of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L342-L349
openvax/varcode
varcode/variant.py
Variant.is_insertion
def is_insertion(self): """ Does this variant represent the insertion of nucleotides into the reference genome? """ # An insertion would appear in a VCF like C>CT, so that the # alternate allele starts with the reference nucleotides. # Since the nucleotide strings may be normalized in the constructor, # it's worth noting that the normalized form of this variant would be # ''>'T', so that 'T'.startswith('') still holds. return (len(self.ref) < len(self.alt)) and self.alt.startswith(self.ref)
python
def is_insertion(self): """ Does this variant represent the insertion of nucleotides into the reference genome? """ # An insertion would appear in a VCF like C>CT, so that the # alternate allele starts with the reference nucleotides. # Since the nucleotide strings may be normalized in the constructor, # it's worth noting that the normalized form of this variant would be # ''>'T', so that 'T'.startswith('') still holds. return (len(self.ref) < len(self.alt)) and self.alt.startswith(self.ref)
Does this variant represent the insertion of nucleotides into the reference genome?
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L369-L379
openvax/varcode
varcode/variant.py
Variant.is_deletion
def is_deletion(self): """ Does this variant represent the deletion of nucleotides from the reference genome? """ # A deletion would appear in a VCF like CT>C, so that the # reference allele starts with the alternate nucleotides. # This is true even in the normalized case, where the alternate # nucleotides are an empty string. return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
python
def is_deletion(self): """ Does this variant represent the deletion of nucleotides from the reference genome? """ # A deletion would appear in a VCF like CT>C, so that the # reference allele starts with the alternate nucleotides. # This is true even in the normalized case, where the alternate # nucleotides are an empty string. return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
Does this variant represent the deletion of nucleotides from the reference genome?
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L382-L391
openvax/varcode
varcode/variant.py
Variant.is_snv
def is_snv(self): """Is the variant a single nucleotide variant""" return (len(self.ref) == len(self.alt) == 1) and (self.ref != self.alt)
python
def is_snv(self): """Is the variant a single nucleotide variant""" return (len(self.ref) == len(self.alt) == 1) and (self.ref != self.alt)
Is the variant a single nucleotide variant
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L399-L401
openvax/varcode
varcode/variant.py
Variant.is_transition
def is_transition(self): """Is this variant and pyrimidine to pyrimidine change or purine to purine change""" return self.is_snv and is_purine(self.ref) == is_purine(self.alt)
python
def is_transition(self): """Is this variant and pyrimidine to pyrimidine change or purine to purine change""" return self.is_snv and is_purine(self.ref) == is_purine(self.alt)
Is this variant and pyrimidine to pyrimidine change or purine to purine change
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L404-L406
openvax/varcode
varcode/variant.py
Variant.is_transversion
def is_transversion(self): """Is this variant a pyrimidine to purine change or vice versa""" return self.is_snv and is_purine(self.ref) != is_purine(self.alt)
python
def is_transversion(self): """Is this variant a pyrimidine to purine change or vice versa""" return self.is_snv and is_purine(self.ref) != is_purine(self.alt)
Is this variant a pyrimidine to purine change or vice versa
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant.py#L409-L411
openvax/varcode
varcode/effects/effect_helpers.py
variant_overlaps_interval
def variant_overlaps_interval( variant_start, n_ref_bases, interval_start, interval_end): """ Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position """ if n_ref_bases == 0: # insertions only overlap intervals which start before and # end after the insertion point, they must be fully contained # by the other interval return interval_start <= variant_start and interval_end >= variant_start variant_end = variant_start + n_ref_bases """ if self._changes_exonic_splice_site( strand_ref, strand_alt,) """ # overlap means other interval starts before this variant ends # and the interval ends after this variant starts return interval_start <= variant_end and interval_end >= variant_start
python
def variant_overlaps_interval( variant_start, n_ref_bases, interval_start, interval_end): """ Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position """ if n_ref_bases == 0: # insertions only overlap intervals which start before and # end after the insertion point, they must be fully contained # by the other interval return interval_start <= variant_start and interval_end >= variant_start variant_end = variant_start + n_ref_bases """ if self._changes_exonic_splice_site( strand_ref, strand_alt,) """ # overlap means other interval starts before this variant ends # and the interval ends after this variant starts return interval_start <= variant_end and interval_end >= variant_start
Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_helpers.py#L23-L61
openvax/varcode
varcode/effects/effect_helpers.py
changes_exonic_splice_site
def changes_exonic_splice_site( transcript_offset, transcript, transcript_ref, transcript_alt, exon_start_offset, exon_end_offset, exon_number): """Does the given exonic mutation of a particular transcript change a splice site? Parameters ---------- transcript_offset : int Offset from start of transcript of first reference nucleotide (or the last nucleotide before an insertion) transcript : pyensembl.Transcript transcript_ref : str Reference nucleotides transcript_alt : alt Alternate nucleotides exon_start_offset : int Start offset of exon relative to beginning of transcript exon_end_offset : int End offset of exon relative to beginning of transcript exon_number : int Which exon in the order they form the transcript """ # first we're going to make sure the variant doesn't disrupt the # splicing sequences we got from Divina et. al's # Ab initio prediction of mutation-induced cryptic # splice-site activation and exon skipping # (http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/) # # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # # 3' splice site: YAG|R # if exon_number > 1 and transcript_offset == exon_start_offset: # if this is any exon past the first, check to see if it lost # the purine on its left side # # the 3' splice site sequence has just a single purine on # the exon side if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES: if len(transcript_alt) > 0: if transcript_alt[0] not in PURINE_NUCLEOTIDES: return True else: # if the mutation is a deletion, are there ref nucleotides # afterward? offset_after_deletion = transcript_offset + len(transcript_ref) if len(transcript.sequence) > offset_after_deletion: next_base = transcript.sequence[offset_after_deletion] if next_base not in PURINE_NUCLEOTIDES: return True if exon_number < len(transcript.exons): # if the mutation affects an exon whose right end gets spliced # to a next exon, check if the variant alters the exon side of # 5' consensus splicing sequence # # splicing sequence: # MAG|GURAGU # M is A or C; R is purine; | is the exon-intron boundary # # TODO: check for overlap of two intervals instead of just # seeing if the mutation starts inside the exonic splice site if variant_overlaps_interval( variant_start=transcript_offset, n_ref_bases=len(transcript_ref), interval_start=exon_end_offset - 2, interval_end=exon_end_offset): end_of_reference_exon = transcript.sequence[ exon_end_offset - 2:exon_end_offset + 1] if matches_exon_end_pattern(end_of_reference_exon): # if the last three nucleotides conform to the consensus # sequence then treat any deviation as an ExonicSpliceSite # mutation end_of_variant_exon = end_of_reference_exon if matches_exon_end_pattern(end_of_variant_exon): # end of exon matches splicing signal, check if it still # does after the mutation return True
python
def changes_exonic_splice_site( transcript_offset, transcript, transcript_ref, transcript_alt, exon_start_offset, exon_end_offset, exon_number): """Does the given exonic mutation of a particular transcript change a splice site? Parameters ---------- transcript_offset : int Offset from start of transcript of first reference nucleotide (or the last nucleotide before an insertion) transcript : pyensembl.Transcript transcript_ref : str Reference nucleotides transcript_alt : alt Alternate nucleotides exon_start_offset : int Start offset of exon relative to beginning of transcript exon_end_offset : int End offset of exon relative to beginning of transcript exon_number : int Which exon in the order they form the transcript """ # first we're going to make sure the variant doesn't disrupt the # splicing sequences we got from Divina et. al's # Ab initio prediction of mutation-induced cryptic # splice-site activation and exon skipping # (http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/) # # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # # 3' splice site: YAG|R # if exon_number > 1 and transcript_offset == exon_start_offset: # if this is any exon past the first, check to see if it lost # the purine on its left side # # the 3' splice site sequence has just a single purine on # the exon side if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES: if len(transcript_alt) > 0: if transcript_alt[0] not in PURINE_NUCLEOTIDES: return True else: # if the mutation is a deletion, are there ref nucleotides # afterward? offset_after_deletion = transcript_offset + len(transcript_ref) if len(transcript.sequence) > offset_after_deletion: next_base = transcript.sequence[offset_after_deletion] if next_base not in PURINE_NUCLEOTIDES: return True if exon_number < len(transcript.exons): # if the mutation affects an exon whose right end gets spliced # to a next exon, check if the variant alters the exon side of # 5' consensus splicing sequence # # splicing sequence: # MAG|GURAGU # M is A or C; R is purine; | is the exon-intron boundary # # TODO: check for overlap of two intervals instead of just # seeing if the mutation starts inside the exonic splice site if variant_overlaps_interval( variant_start=transcript_offset, n_ref_bases=len(transcript_ref), interval_start=exon_end_offset - 2, interval_end=exon_end_offset): end_of_reference_exon = transcript.sequence[ exon_end_offset - 2:exon_end_offset + 1] if matches_exon_end_pattern(end_of_reference_exon): # if the last three nucleotides conform to the consensus # sequence then treat any deviation as an ExonicSpliceSite # mutation end_of_variant_exon = end_of_reference_exon if matches_exon_end_pattern(end_of_variant_exon): # end of exon matches splicing signal, check if it still # does after the mutation return True
Does the given exonic mutation of a particular transcript change a splice site? Parameters ---------- transcript_offset : int Offset from start of transcript of first reference nucleotide (or the last nucleotide before an insertion) transcript : pyensembl.Transcript transcript_ref : str Reference nucleotides transcript_alt : alt Alternate nucleotides exon_start_offset : int Start offset of exon relative to beginning of transcript exon_end_offset : int End offset of exon relative to beginning of transcript exon_number : int Which exon in the order they form the transcript
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_helpers.py#L72-L163
openvax/varcode
varcode/nucleotides.py
is_purine
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
python
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
Is the nucleotide a purine
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/nucleotides.py#L55-L60
openvax/varcode
varcode/nucleotides.py
normalize_nucleotide_string
def normalize_nucleotide_string( nucleotides, allow_extended_nucleotides=False, empty_chars=".-", treat_nan_as_empty=True): """ Normalizes a nucleotide string by converting various ways of encoding empty strings into "", making all letters upper case, and checking to make sure all letters in the string are actually nucleotides. Parameters ---------- nucleotides : str Sequence of nucleotides, e.g. "ACCTG" extended_nucleotides : bool Allow non-canonical nucleotide characters like 'X' for unknown base empty_chars : str Characters which encode empty strings, such as "." used in VCF format or "-" used in MAF format treat_nan_as_empty : bool Some MAF files represent deletions/insertions with NaN ref/alt values """ if nucleotides in empty_chars: return "" elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan(nucleotides): return "" require_string(nucleotides, name="nucleotide string") nucleotides = nucleotides.upper() if allow_extended_nucleotides: valid_nucleotides = EXTENDED_NUCLEOTIDES else: valid_nucleotides = STANDARD_NUCLEOTIDES if not set(nucleotides) <= valid_nucleotides: raise ValueError( "Invalid character(s) in nucleotide string: %s" % ( ",".join(set(nucleotides) - valid_nucleotides),)) return nucleotides
python
def normalize_nucleotide_string( nucleotides, allow_extended_nucleotides=False, empty_chars=".-", treat_nan_as_empty=True): """ Normalizes a nucleotide string by converting various ways of encoding empty strings into "", making all letters upper case, and checking to make sure all letters in the string are actually nucleotides. Parameters ---------- nucleotides : str Sequence of nucleotides, e.g. "ACCTG" extended_nucleotides : bool Allow non-canonical nucleotide characters like 'X' for unknown base empty_chars : str Characters which encode empty strings, such as "." used in VCF format or "-" used in MAF format treat_nan_as_empty : bool Some MAF files represent deletions/insertions with NaN ref/alt values """ if nucleotides in empty_chars: return "" elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan(nucleotides): return "" require_string(nucleotides, name="nucleotide string") nucleotides = nucleotides.upper() if allow_extended_nucleotides: valid_nucleotides = EXTENDED_NUCLEOTIDES else: valid_nucleotides = STANDARD_NUCLEOTIDES if not set(nucleotides) <= valid_nucleotides: raise ValueError( "Invalid character(s) in nucleotide string: %s" % ( ",".join(set(nucleotides) - valid_nucleotides),)) return nucleotides
Normalizes a nucleotide string by converting various ways of encoding empty strings into "", making all letters upper case, and checking to make sure all letters in the string are actually nucleotides. Parameters ---------- nucleotides : str Sequence of nucleotides, e.g. "ACCTG" extended_nucleotides : bool Allow non-canonical nucleotide characters like 'X' for unknown base empty_chars : str Characters which encode empty strings, such as "." used in VCF format or "-" used in MAF format treat_nan_as_empty : bool Some MAF files represent deletions/insertions with NaN ref/alt values
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/nucleotides.py#L67-L111
openvax/varcode
varcode/vcf.py
load_vcf
def load_vcf( path, genome=None, reference_vcf_key="reference", only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True): """ Load reference name and Variant objects from the given VCF filename. Currently only local files are supported by this function (no http). If you call this on an HTTP URL, it will fall back to `load_vcf`. Parameters ---------- path : str Path to VCF (*.vcf) or compressed VCF (*.vcf.gz). genome : {pyensembl.Genome, reference name, Ensembl version int}, optional Optionally pass in a PyEnsembl Genome object, name of reference, or PyEnsembl release version to specify the reference associated with a VCF (otherwise infer reference from VCF using reference_vcf_key) reference_vcf_key : str, optional Name of metadata field which contains path to reference FASTA file (default = 'reference') only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. allow_extended_nucleotides : boolean, default False Allow characters other that A,C,T,G in the ref and alt strings. include_info : boolean, default True Whether to parse the INFO and per-sample columns. If you don't need these, set to False for faster parsing. chunk_size: int, optional Number of records to load in memory at once. max_variants : int, optional If specified, return only the first max_variants variants. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : boolean, default True Don't keep repeated variants """ require_string(path, "Path or URL to VCF") parsed_path = parse_url_or_path(path) if parsed_path.scheme and parsed_path.scheme.lower() != "file": # pandas.read_table nominally supports HTTP, but it tends to crash on # large files and does not support gzip. Switching to the python-based # implementation of read_table (with engine="python") helps with some # issues but introduces a new set of problems (e.g. the dtype parameter # is not accepted). For these reasons, we're currently not attempting # to load VCFs over HTTP with pandas directly, and instead download it # to a temporary file and open that. (filename, headers) = urllib.request.urlretrieve(path) try: # The downloaded file has no file extension, which confuses pyvcf # for gziped files in Python 3. We rename it to have the correct # file extension. new_filename = "%s.%s" % ( filename, parsed_path.path.split(".")[-1]) os.rename(filename, new_filename) filename = new_filename return load_vcf( filename, genome=genome, reference_vcf_key=reference_vcf_key, only_passing=only_passing, allow_extended_nucleotides=allow_extended_nucleotides, include_info=include_info, chunk_size=chunk_size, max_variants=max_variants, sort_key=sort_key, distinct=distinct) finally: logger.info("Removing temporary file: %s", filename) os.unlink(filename) # Loading a local file. # The file will be opened twice: first to parse the header with pyvcf, then # by pandas to read the data. # PyVCF reads the metadata immediately and stops at the first line with # data. We can close the file after that. handle = PyVCFReaderFromPathOrURL(path) handle.close() genome = infer_genome_from_vcf( genome, handle.vcf_reader, reference_vcf_key) df_iterator = read_vcf_into_dataframe( path, include_info=include_info, sample_names=handle.vcf_reader.samples if include_info else None, chunk_size=chunk_size) if include_info: def sample_info_parser(unparsed_sample_info_strings, format_string): """ Given a format string like "GT:AD:ADP:DP:FS" and a list of sample info strings where each entry is like "0/1:3,22:T=3,G=22:25:33", return a dict that maps: sample name -> field name -> value. Uses pyvcf to parse the fields. """ return pyvcf_calls_to_sample_info_list( handle.vcf_reader._parse_samples( unparsed_sample_info_strings, format_string, None)) else: sample_info_parser = None return dataframes_to_variant_collection( df_iterator, source_path=path, info_parser=handle.vcf_reader._parse_info if include_info else None, only_passing=only_passing, max_variants=max_variants, sample_names=handle.vcf_reader.samples if include_info else None, sample_info_parser=sample_info_parser, variant_kwargs={ 'ensembl': genome, 'allow_extended_nucleotides': allow_extended_nucleotides}, variant_collection_kwargs={ 'sort_key': sort_key, 'distinct': distinct})
python
def load_vcf( path, genome=None, reference_vcf_key="reference", only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True): """ Load reference name and Variant objects from the given VCF filename. Currently only local files are supported by this function (no http). If you call this on an HTTP URL, it will fall back to `load_vcf`. Parameters ---------- path : str Path to VCF (*.vcf) or compressed VCF (*.vcf.gz). genome : {pyensembl.Genome, reference name, Ensembl version int}, optional Optionally pass in a PyEnsembl Genome object, name of reference, or PyEnsembl release version to specify the reference associated with a VCF (otherwise infer reference from VCF using reference_vcf_key) reference_vcf_key : str, optional Name of metadata field which contains path to reference FASTA file (default = 'reference') only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. allow_extended_nucleotides : boolean, default False Allow characters other that A,C,T,G in the ref and alt strings. include_info : boolean, default True Whether to parse the INFO and per-sample columns. If you don't need these, set to False for faster parsing. chunk_size: int, optional Number of records to load in memory at once. max_variants : int, optional If specified, return only the first max_variants variants. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : boolean, default True Don't keep repeated variants """ require_string(path, "Path or URL to VCF") parsed_path = parse_url_or_path(path) if parsed_path.scheme and parsed_path.scheme.lower() != "file": # pandas.read_table nominally supports HTTP, but it tends to crash on # large files and does not support gzip. Switching to the python-based # implementation of read_table (with engine="python") helps with some # issues but introduces a new set of problems (e.g. the dtype parameter # is not accepted). For these reasons, we're currently not attempting # to load VCFs over HTTP with pandas directly, and instead download it # to a temporary file and open that. (filename, headers) = urllib.request.urlretrieve(path) try: # The downloaded file has no file extension, which confuses pyvcf # for gziped files in Python 3. We rename it to have the correct # file extension. new_filename = "%s.%s" % ( filename, parsed_path.path.split(".")[-1]) os.rename(filename, new_filename) filename = new_filename return load_vcf( filename, genome=genome, reference_vcf_key=reference_vcf_key, only_passing=only_passing, allow_extended_nucleotides=allow_extended_nucleotides, include_info=include_info, chunk_size=chunk_size, max_variants=max_variants, sort_key=sort_key, distinct=distinct) finally: logger.info("Removing temporary file: %s", filename) os.unlink(filename) # Loading a local file. # The file will be opened twice: first to parse the header with pyvcf, then # by pandas to read the data. # PyVCF reads the metadata immediately and stops at the first line with # data. We can close the file after that. handle = PyVCFReaderFromPathOrURL(path) handle.close() genome = infer_genome_from_vcf( genome, handle.vcf_reader, reference_vcf_key) df_iterator = read_vcf_into_dataframe( path, include_info=include_info, sample_names=handle.vcf_reader.samples if include_info else None, chunk_size=chunk_size) if include_info: def sample_info_parser(unparsed_sample_info_strings, format_string): """ Given a format string like "GT:AD:ADP:DP:FS" and a list of sample info strings where each entry is like "0/1:3,22:T=3,G=22:25:33", return a dict that maps: sample name -> field name -> value. Uses pyvcf to parse the fields. """ return pyvcf_calls_to_sample_info_list( handle.vcf_reader._parse_samples( unparsed_sample_info_strings, format_string, None)) else: sample_info_parser = None return dataframes_to_variant_collection( df_iterator, source_path=path, info_parser=handle.vcf_reader._parse_info if include_info else None, only_passing=only_passing, max_variants=max_variants, sample_names=handle.vcf_reader.samples if include_info else None, sample_info_parser=sample_info_parser, variant_kwargs={ 'ensembl': genome, 'allow_extended_nucleotides': allow_extended_nucleotides}, variant_collection_kwargs={ 'sort_key': sort_key, 'distinct': distinct})
Load reference name and Variant objects from the given VCF filename. Currently only local files are supported by this function (no http). If you call this on an HTTP URL, it will fall back to `load_vcf`. Parameters ---------- path : str Path to VCF (*.vcf) or compressed VCF (*.vcf.gz). genome : {pyensembl.Genome, reference name, Ensembl version int}, optional Optionally pass in a PyEnsembl Genome object, name of reference, or PyEnsembl release version to specify the reference associated with a VCF (otherwise infer reference from VCF using reference_vcf_key) reference_vcf_key : str, optional Name of metadata field which contains path to reference FASTA file (default = 'reference') only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. allow_extended_nucleotides : boolean, default False Allow characters other that A,C,T,G in the ref and alt strings. include_info : boolean, default True Whether to parse the INFO and per-sample columns. If you don't need these, set to False for faster parsing. chunk_size: int, optional Number of records to load in memory at once. max_variants : int, optional If specified, return only the first max_variants variants. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : boolean, default True Don't keep repeated variants
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L37-L176
openvax/varcode
varcode/vcf.py
pyvcf_calls_to_sample_info_list
def pyvcf_calls_to_sample_info_list(calls): """ Given pyvcf.model._Call instances, return a dict mapping each sample name to its per-sample info: sample name -> field -> value """ return OrderedDict( (call.sample, call.data._asdict()) for call in calls)
python
def pyvcf_calls_to_sample_info_list(calls): """ Given pyvcf.model._Call instances, return a dict mapping each sample name to its per-sample info: sample name -> field -> value """ return OrderedDict( (call.sample, call.data._asdict()) for call in calls)
Given pyvcf.model._Call instances, return a dict mapping each sample name to its per-sample info: sample name -> field -> value
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L189-L196
openvax/varcode
varcode/vcf.py
dataframes_to_variant_collection
def dataframes_to_variant_collection( dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={}): """ Load a VariantCollection from an iterable of pandas dataframes. This takes an iterable of dataframes instead of a single dataframe to avoid having to load huge dataframes at once into memory. If you have a single dataframe, just pass it in a single-element list. Parameters ---------- dataframes Iterable of dataframes (e.g. a generator). Expected columns are: ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] and 'INFO' if `info_parser` is not Null. Columns must be in this order. source_path : str Path of VCF file from which DataFrame chunks were generated. info_parser : string -> object, optional Callable to parse INFO strings. only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. max_variants : int, optional If specified, return only the first max_variants variants. sample_names : list of strings, optional Sample names. The final columns of the dataframe should match these. If specified, the per-sample info columns will be parsed. You must also specify sample_info_parser. sample_info_parser : string list * string -> dict, optional Callable to parse per-sample info columns. variant_kwargs : dict, optional Additional keyword paramters to pass to Variant.__init__ variant_collection_kwargs : dict, optional Additional keyword parameters to pass to VariantCollection.__init__. """ expected_columns = ( ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] + (["INFO"] if info_parser else [])) if info_parser and sample_names: if sample_info_parser is None: raise TypeError( "Must specify sample_info_parser if specifying sample_names") expected_columns.append("FORMAT") expected_columns.extend(sample_names) variants = [] metadata = {} try: for chunk in dataframes: assert chunk.columns.tolist() == expected_columns,\ "dataframe columns (%s) do not match expected columns (%s)" % ( chunk.columns, expected_columns) for tpl in chunk.itertuples(): (i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8] if flter == ".": flter = None elif flter == "PASS": flter = [] elif only_passing: continue else: flter = flter.split(';') if id_ == ".": id_ = None qual = float(qual) if qual != "." else None alt_num = 0 info = sample_info = None for alt in alts.split(","): if alt != ".": if info_parser is not None and info is None: info = info_parser(tpl[8]) # INFO column if sample_names: # Sample name -> field -> value dict. sample_info = sample_info_parser( list(tpl[10:]), # sample info columns tpl[9], # FORMAT column ) variant = Variant( chrom, int(pos), # want a Python int not numpy.int64 ref, alt, **variant_kwargs) variants.append(variant) metadata[variant] = { 'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num, } if max_variants and len(variants) > max_variants: raise StopIteration alt_num += 1 except StopIteration: pass return VariantCollection( variants=variants, source_to_metadata_dict={source_path: metadata}, **variant_collection_kwargs)
python
def dataframes_to_variant_collection( dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={}): """ Load a VariantCollection from an iterable of pandas dataframes. This takes an iterable of dataframes instead of a single dataframe to avoid having to load huge dataframes at once into memory. If you have a single dataframe, just pass it in a single-element list. Parameters ---------- dataframes Iterable of dataframes (e.g. a generator). Expected columns are: ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] and 'INFO' if `info_parser` is not Null. Columns must be in this order. source_path : str Path of VCF file from which DataFrame chunks were generated. info_parser : string -> object, optional Callable to parse INFO strings. only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. max_variants : int, optional If specified, return only the first max_variants variants. sample_names : list of strings, optional Sample names. The final columns of the dataframe should match these. If specified, the per-sample info columns will be parsed. You must also specify sample_info_parser. sample_info_parser : string list * string -> dict, optional Callable to parse per-sample info columns. variant_kwargs : dict, optional Additional keyword paramters to pass to Variant.__init__ variant_collection_kwargs : dict, optional Additional keyword parameters to pass to VariantCollection.__init__. """ expected_columns = ( ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] + (["INFO"] if info_parser else [])) if info_parser and sample_names: if sample_info_parser is None: raise TypeError( "Must specify sample_info_parser if specifying sample_names") expected_columns.append("FORMAT") expected_columns.extend(sample_names) variants = [] metadata = {} try: for chunk in dataframes: assert chunk.columns.tolist() == expected_columns,\ "dataframe columns (%s) do not match expected columns (%s)" % ( chunk.columns, expected_columns) for tpl in chunk.itertuples(): (i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8] if flter == ".": flter = None elif flter == "PASS": flter = [] elif only_passing: continue else: flter = flter.split(';') if id_ == ".": id_ = None qual = float(qual) if qual != "." else None alt_num = 0 info = sample_info = None for alt in alts.split(","): if alt != ".": if info_parser is not None and info is None: info = info_parser(tpl[8]) # INFO column if sample_names: # Sample name -> field -> value dict. sample_info = sample_info_parser( list(tpl[10:]), # sample info columns tpl[9], # FORMAT column ) variant = Variant( chrom, int(pos), # want a Python int not numpy.int64 ref, alt, **variant_kwargs) variants.append(variant) metadata[variant] = { 'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num, } if max_variants and len(variants) > max_variants: raise StopIteration alt_num += 1 except StopIteration: pass return VariantCollection( variants=variants, source_to_metadata_dict={source_path: metadata}, **variant_collection_kwargs)
Load a VariantCollection from an iterable of pandas dataframes. This takes an iterable of dataframes instead of a single dataframe to avoid having to load huge dataframes at once into memory. If you have a single dataframe, just pass it in a single-element list. Parameters ---------- dataframes Iterable of dataframes (e.g. a generator). Expected columns are: ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] and 'INFO' if `info_parser` is not Null. Columns must be in this order. source_path : str Path of VCF file from which DataFrame chunks were generated. info_parser : string -> object, optional Callable to parse INFO strings. only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. max_variants : int, optional If specified, return only the first max_variants variants. sample_names : list of strings, optional Sample names. The final columns of the dataframe should match these. If specified, the per-sample info columns will be parsed. You must also specify sample_info_parser. sample_info_parser : string list * string -> dict, optional Callable to parse per-sample info columns. variant_kwargs : dict, optional Additional keyword paramters to pass to Variant.__init__ variant_collection_kwargs : dict, optional Additional keyword parameters to pass to VariantCollection.__init__.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L199-L321
openvax/varcode
varcode/vcf.py
read_vcf_into_dataframe
def read_vcf_into_dataframe( path, include_info=False, sample_names=None, chunk_size=None): """ Load the data of a VCF into a pandas dataframe. All headers are ignored. Parameters ---------- path : str Path to local file. HTTP and other protocols are not implemented. include_info : boolean, default False If true, the INFO field is not parsed, but is included as a string in the resulting data frame. If false, the INFO field is omitted. sample_names: string list, optional Sample names. The final columns of the dataframe should match these. If specified (and include_info is also specified), the FORMAT and per-sample info columns will be included in the result dataframe. chunk_size : int, optional If buffering is desired, the number of rows per chunk. Returns --------- If chunk_size is None (the default), a dataframe with the contents of the VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows. """ vcf_field_types = OrderedDict() vcf_field_types['CHROM'] = str vcf_field_types['POS'] = int vcf_field_types['ID'] = str vcf_field_types['REF'] = str vcf_field_types['ALT'] = str vcf_field_types['QUAL'] = str vcf_field_types['FILTER'] = str if include_info: vcf_field_types['INFO'] = str if sample_names: vcf_field_types['FORMAT'] = str for name in sample_names: vcf_field_types[name] = str parsed_path = parse_url_or_path(path) if not parsed_path.scheme or parsed_path.scheme.lower() == "file": path = parsed_path.path else: raise NotImplementedError("Only local files are supported.") compression = None if path.endswith(".gz"): compression = "gzip" elif path.endswith(".bz2"): compression = "bz2" reader = pandas.read_table( path, compression=compression, comment="#", chunksize=chunk_size, dtype=vcf_field_types, names=list(vcf_field_types), usecols=range(len(vcf_field_types))) return reader
python
def read_vcf_into_dataframe( path, include_info=False, sample_names=None, chunk_size=None): """ Load the data of a VCF into a pandas dataframe. All headers are ignored. Parameters ---------- path : str Path to local file. HTTP and other protocols are not implemented. include_info : boolean, default False If true, the INFO field is not parsed, but is included as a string in the resulting data frame. If false, the INFO field is omitted. sample_names: string list, optional Sample names. The final columns of the dataframe should match these. If specified (and include_info is also specified), the FORMAT and per-sample info columns will be included in the result dataframe. chunk_size : int, optional If buffering is desired, the number of rows per chunk. Returns --------- If chunk_size is None (the default), a dataframe with the contents of the VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows. """ vcf_field_types = OrderedDict() vcf_field_types['CHROM'] = str vcf_field_types['POS'] = int vcf_field_types['ID'] = str vcf_field_types['REF'] = str vcf_field_types['ALT'] = str vcf_field_types['QUAL'] = str vcf_field_types['FILTER'] = str if include_info: vcf_field_types['INFO'] = str if sample_names: vcf_field_types['FORMAT'] = str for name in sample_names: vcf_field_types[name] = str parsed_path = parse_url_or_path(path) if not parsed_path.scheme or parsed_path.scheme.lower() == "file": path = parsed_path.path else: raise NotImplementedError("Only local files are supported.") compression = None if path.endswith(".gz"): compression = "gzip" elif path.endswith(".bz2"): compression = "bz2" reader = pandas.read_table( path, compression=compression, comment="#", chunksize=chunk_size, dtype=vcf_field_types, names=list(vcf_field_types), usecols=range(len(vcf_field_types))) return reader
Load the data of a VCF into a pandas dataframe. All headers are ignored. Parameters ---------- path : str Path to local file. HTTP and other protocols are not implemented. include_info : boolean, default False If true, the INFO field is not parsed, but is included as a string in the resulting data frame. If false, the INFO field is omitted. sample_names: string list, optional Sample names. The final columns of the dataframe should match these. If specified (and include_info is also specified), the FORMAT and per-sample info columns will be included in the result dataframe. chunk_size : int, optional If buffering is desired, the number of rows per chunk. Returns --------- If chunk_size is None (the default), a dataframe with the contents of the VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L324-L390
openvax/varcode
varcode/vcf.py
stream_gzip_decompress_lines
def stream_gzip_decompress_lines(stream): """ Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines. """ dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = "" for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split("\n") previous = lines.pop() for line in lines: yield line yield previous
python
def stream_gzip_decompress_lines(stream): """ Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines. """ dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = "" for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split("\n") previous = lines.pop() for line in lines: yield line yield previous
Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L448-L469
openvax/varcode
varcode/vcf.py
infer_genome_from_vcf
def infer_genome_from_vcf(genome, vcf_reader, reference_vcf_key): """ Helper function to make a pyensembl.Genome instance. """ if genome: return infer_genome(genome) elif reference_vcf_key not in vcf_reader.metadata: raise ValueError("Unable to infer reference genome for %s" % ( vcf_reader.filename,)) else: reference_path = vcf_reader.metadata[reference_vcf_key] return infer_genome(reference_path)
python
def infer_genome_from_vcf(genome, vcf_reader, reference_vcf_key): """ Helper function to make a pyensembl.Genome instance. """ if genome: return infer_genome(genome) elif reference_vcf_key not in vcf_reader.metadata: raise ValueError("Unable to infer reference genome for %s" % ( vcf_reader.filename,)) else: reference_path = vcf_reader.metadata[reference_vcf_key] return infer_genome(reference_path)
Helper function to make a pyensembl.Genome instance.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf.py#L472-L483
openvax/varcode
varcode/effects/effect_prediction_coding_frameshift.py
create_frameshift_effect
def create_frameshift_effect( mutated_codon_index, sequence_from_mutated_codon, variant, transcript): """ Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript """ assert transcript.protein_sequence is not None, \ "Expect transcript %s to have protein sequence" % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate( nucleotide_sequence=sequence_from_mutated_codon, first_codon_is_start=False, to_stop=True, truncate=True) if mutated_codon_index == 0: # TODO: scan through sequence_from_mutated_codon for # Kozak sequence + start codon to choose the new start return StartLoss(variant=variant, transcript=transcript) # the frameshifted sequence may contain some amino acids which are # the same as the original protein! _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix( ref=original_protein_sequence[mutated_codon_index:], alt=mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = mutated_codon_index + n_unchanged_amino_acids # miraculously, this frameshift left the protein unchanged, # most likely by turning one stop codon into another stop codon if n_unchanged_amino_acids == 0: aa_ref = "" else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: # frameshift is either extending the protein or leaving it unchanged if len(mutant_protein_suffix) == 0: return Silent( variant=variant, transcript=transcript, aa_pos=mutated_codon_index, aa_ref=aa_ref) else: # When all the amino acids are the same as the original, we either # have the original protein or we've extended it. # If we've extended it, it means we must have lost our stop codon. return StopLoss( variant=variant, transcript=transcript, aa_ref=aa_ref, aa_alt=mutant_protein_suffix) # original amino acid at the mutated codon before the frameshift occurred aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] # TODO: what if all the shifted amino acids were the same and the protein # ended up the same length? Add a Silent case? if len(mutant_protein_suffix) == 0: # if a frameshift doesn't create any new amino acids, then # it must immediately have hit a stop codon return FrameShiftTruncation( variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) return FrameShift( variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
python
def create_frameshift_effect( mutated_codon_index, sequence_from_mutated_codon, variant, transcript): """ Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript """ assert transcript.protein_sequence is not None, \ "Expect transcript %s to have protein sequence" % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate( nucleotide_sequence=sequence_from_mutated_codon, first_codon_is_start=False, to_stop=True, truncate=True) if mutated_codon_index == 0: # TODO: scan through sequence_from_mutated_codon for # Kozak sequence + start codon to choose the new start return StartLoss(variant=variant, transcript=transcript) # the frameshifted sequence may contain some amino acids which are # the same as the original protein! _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix( ref=original_protein_sequence[mutated_codon_index:], alt=mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = mutated_codon_index + n_unchanged_amino_acids # miraculously, this frameshift left the protein unchanged, # most likely by turning one stop codon into another stop codon if n_unchanged_amino_acids == 0: aa_ref = "" else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: # frameshift is either extending the protein or leaving it unchanged if len(mutant_protein_suffix) == 0: return Silent( variant=variant, transcript=transcript, aa_pos=mutated_codon_index, aa_ref=aa_ref) else: # When all the amino acids are the same as the original, we either # have the original protein or we've extended it. # If we've extended it, it means we must have lost our stop codon. return StopLoss( variant=variant, transcript=transcript, aa_ref=aa_ref, aa_alt=mutant_protein_suffix) # original amino acid at the mutated codon before the frameshift occurred aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] # TODO: what if all the shifted amino acids were the same and the protein # ended up the same length? Add a Silent case? if len(mutant_protein_suffix) == 0: # if a frameshift doesn't create any new amino acids, then # it must immediately have hit a stop codon return FrameShiftTruncation( variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) return FrameShift( variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_frameshift.py#L35-L123
openvax/varcode
varcode/effects/effect_prediction_coding_frameshift.py
cdna_codon_sequence_after_insertion_frameshift
def cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides): """ Returns index of mutated codon and nucleotide sequence starting at the first mutated codon. """ # special logic for insertions coding_sequence_after_insertion = \ sequence_from_start_codon[cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: # insertion happens after last nucleotide in a codon, # doesn't disrupt the existing codon from cds_offset-2 to cds_offset mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = "" elif cds_offset_before_insertion % 3 == 1: # insertion happens after 2nd nucleotide of a codon # codon positions: # 1) cds_offset - 1 # 2) cds_offset # <----- Insertsion # 3) cds_offset + 1 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain two reference # nucleotides before the insertion nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: # insertion happens after 1st nucleotide of a codon # codon positions: # 1) cds_offset # <----- Insertsion # 2) cds_offset + 1 # 3) cds_offset + 2 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain one reference # nucleotide before the insertion nucleotides_before = sequence_from_start_codon[cds_offset_before_insertion] sequence_from_mutated_codon = ( nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
python
def cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides): """ Returns index of mutated codon and nucleotide sequence starting at the first mutated codon. """ # special logic for insertions coding_sequence_after_insertion = \ sequence_from_start_codon[cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: # insertion happens after last nucleotide in a codon, # doesn't disrupt the existing codon from cds_offset-2 to cds_offset mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = "" elif cds_offset_before_insertion % 3 == 1: # insertion happens after 2nd nucleotide of a codon # codon positions: # 1) cds_offset - 1 # 2) cds_offset # <----- Insertsion # 3) cds_offset + 1 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain two reference # nucleotides before the insertion nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: # insertion happens after 1st nucleotide of a codon # codon positions: # 1) cds_offset # <----- Insertsion # 2) cds_offset + 1 # 3) cds_offset + 2 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain one reference # nucleotide before the insertion nucleotides_before = sequence_from_start_codon[cds_offset_before_insertion] sequence_from_mutated_codon = ( nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
Returns index of mutated codon and nucleotide sequence starting at the first mutated codon.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_frameshift.py#L125-L169
openvax/varcode
varcode/effects/effect_prediction_coding_frameshift.py
cdna_codon_sequence_after_deletion_or_substitution_frameshift
def cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt): """ Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward. """ mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript. sequence_after_mutated_codon = \ sequence_from_start_codon[mutated_codon_index * 3:] # the variant's ref nucleotides should start either 0, 1, or 2 nucleotides # into `sequence_after_mutated_codon` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute( sequence=sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref=trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
python
def cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt): """ Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward. """ mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript. sequence_after_mutated_codon = \ sequence_from_start_codon[mutated_codon_index * 3:] # the variant's ref nucleotides should start either 0, 1, or 2 nucleotides # into `sequence_after_mutated_codon` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute( sequence=sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref=trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_frameshift.py#L172-L204
openvax/varcode
varcode/effects/effect_prediction_coding_frameshift.py
predict_frameshift_coding_effect
def predict_frameshift_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset, sequence_from_start_codon): """ Coding effect of a frameshift mutation. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : nucleotide sequence Reference nucleotides in the coding sequence of the given transcript. trimmed_cdna_alt : nucleotide sequence Alternate nucleotides introduced by mutation cds_offset : int Offset into the CDS of first ref nucleotide. For insertions, this is the offset of the last ref nucleotide before the insertion. sequence_from_start_codon : nucleotide sequence Nucleotides of the coding sequence and 3' UTR """ if len(trimmed_cdna_ref) != 0: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt) else: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset_before_insertion=cds_offset, inserted_nucleotides=trimmed_cdna_alt) return create_frameshift_effect( mutated_codon_index=mutated_codon_index, sequence_from_mutated_codon=sequence_from_mutated_codon, variant=variant, transcript=transcript)
python
def predict_frameshift_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset, sequence_from_start_codon): """ Coding effect of a frameshift mutation. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : nucleotide sequence Reference nucleotides in the coding sequence of the given transcript. trimmed_cdna_alt : nucleotide sequence Alternate nucleotides introduced by mutation cds_offset : int Offset into the CDS of first ref nucleotide. For insertions, this is the offset of the last ref nucleotide before the insertion. sequence_from_start_codon : nucleotide sequence Nucleotides of the coding sequence and 3' UTR """ if len(trimmed_cdna_ref) != 0: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt) else: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset_before_insertion=cds_offset, inserted_nucleotides=trimmed_cdna_alt) return create_frameshift_effect( mutated_codon_index=mutated_codon_index, sequence_from_mutated_codon=sequence_from_mutated_codon, variant=variant, transcript=transcript)
Coding effect of a frameshift mutation. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : nucleotide sequence Reference nucleotides in the coding sequence of the given transcript. trimmed_cdna_alt : nucleotide sequence Alternate nucleotides introduced by mutation cds_offset : int Offset into the CDS of first ref nucleotide. For insertions, this is the offset of the last ref nucleotide before the insertion. sequence_from_start_codon : nucleotide sequence Nucleotides of the coding sequence and 3' UTR
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_frameshift.py#L207-L254
openvax/varcode
varcode/effects/effect_prediction_coding.py
predict_variant_coding_effect_on_transcript
def predict_variant_coding_effect_on_transcript( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, transcript_offset): """ Given a minimal cDNA ref/alt nucleotide string pair and an offset into a given transcript, determine the coding effect of this nucleotide substitution onto the translated protein. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides we expect to find in the transcript's CDS trimmed_cdna_alt : str Alternate nucleotides we're replacing the reference with transcript_offset : int Offset into the full transcript sequence of the ref->alt substitution """ if not transcript.complete: raise ValueError( ("Can't annotate coding effect for %s" " on incomplete transcript %s" % (variant, transcript))) sequence = transcript.sequence n_ref = len(trimmed_cdna_ref) n_alt = len(trimmed_cdna_alt) # reference nucleotides found on the transcript, if these don't match # what we were told to expect from the variant then raise an exception ref_nucleotides_from_transcript = str( sequence[transcript_offset:transcript_offset + n_ref]) # Make sure that the reference sequence agrees with what we expected # from the VCF assert ref_nucleotides_from_transcript == trimmed_cdna_ref, \ "%s: expected ref '%s' at offset %d of %s, transcript has '%s'" % ( variant, trimmed_cdna_ref, transcript_offset, transcript, ref_nucleotides_from_transcript) start_codon_offset = transcript.first_start_codon_spliced_offset stop_codon_offset = transcript.last_stop_codon_spliced_offset cds_len = stop_codon_offset - start_codon_offset + 1 if cds_len < 3: raise ValueError( "Coding sequence for %s is too short: '%s'" % ( transcript, transcript.sequence[start_codon_offset:stop_codon_offset + 1])) if n_ref == 0 and transcript.strand == "-": # By convention, genomic insertions happen *after* their base 1 position on # a chromosome. On the reverse strand, however, an insertion has to go # before the nucleotide at some transcript offset. # Example: # chromosome sequence: # TTT|GATCTCGTA|CCC # transcript on reverse strand: # CCC|ATGCTCTAG|TTT # where the CDS is emphasized: # ATGCTCTAG # If we have a genomic insertion g.6insATT # the genomic sequence becomes: # TTT|GAT_ATT_CTCGTA|CCC # (insert the "ATT" after the "T" at position 6) # On the reverse strand this becomes: # CCC|ATGCTC_TTA_TAG|TTT # (insert the "ATT" *before* the "T" at position 10) # # To preserve the interpretation of the start offset as the base # before the insertion, need to subtract one cds_offset = transcript_offset - start_codon_offset - 1 else: cds_offset = transcript_offset - start_codon_offset assert cds_offset < cds_len, \ "Expected CDS offset (%d) < |CDS| (%d) for %s on %s" % ( cds_offset, cds_len, variant, transcript) sequence_from_start_codon = str(sequence[start_codon_offset:]) # is this an in-frame mutations? if (n_ref - n_alt) % 3 == 0: return predict_in_frame_coding_effect( variant=variant, transcript=transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon) else: return predict_frameshift_coding_effect( variant=variant, transcript=transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon)
python
def predict_variant_coding_effect_on_transcript( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, transcript_offset): """ Given a minimal cDNA ref/alt nucleotide string pair and an offset into a given transcript, determine the coding effect of this nucleotide substitution onto the translated protein. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides we expect to find in the transcript's CDS trimmed_cdna_alt : str Alternate nucleotides we're replacing the reference with transcript_offset : int Offset into the full transcript sequence of the ref->alt substitution """ if not transcript.complete: raise ValueError( ("Can't annotate coding effect for %s" " on incomplete transcript %s" % (variant, transcript))) sequence = transcript.sequence n_ref = len(trimmed_cdna_ref) n_alt = len(trimmed_cdna_alt) # reference nucleotides found on the transcript, if these don't match # what we were told to expect from the variant then raise an exception ref_nucleotides_from_transcript = str( sequence[transcript_offset:transcript_offset + n_ref]) # Make sure that the reference sequence agrees with what we expected # from the VCF assert ref_nucleotides_from_transcript == trimmed_cdna_ref, \ "%s: expected ref '%s' at offset %d of %s, transcript has '%s'" % ( variant, trimmed_cdna_ref, transcript_offset, transcript, ref_nucleotides_from_transcript) start_codon_offset = transcript.first_start_codon_spliced_offset stop_codon_offset = transcript.last_stop_codon_spliced_offset cds_len = stop_codon_offset - start_codon_offset + 1 if cds_len < 3: raise ValueError( "Coding sequence for %s is too short: '%s'" % ( transcript, transcript.sequence[start_codon_offset:stop_codon_offset + 1])) if n_ref == 0 and transcript.strand == "-": # By convention, genomic insertions happen *after* their base 1 position on # a chromosome. On the reverse strand, however, an insertion has to go # before the nucleotide at some transcript offset. # Example: # chromosome sequence: # TTT|GATCTCGTA|CCC # transcript on reverse strand: # CCC|ATGCTCTAG|TTT # where the CDS is emphasized: # ATGCTCTAG # If we have a genomic insertion g.6insATT # the genomic sequence becomes: # TTT|GAT_ATT_CTCGTA|CCC # (insert the "ATT" after the "T" at position 6) # On the reverse strand this becomes: # CCC|ATGCTC_TTA_TAG|TTT # (insert the "ATT" *before* the "T" at position 10) # # To preserve the interpretation of the start offset as the base # before the insertion, need to subtract one cds_offset = transcript_offset - start_codon_offset - 1 else: cds_offset = transcript_offset - start_codon_offset assert cds_offset < cds_len, \ "Expected CDS offset (%d) < |CDS| (%d) for %s on %s" % ( cds_offset, cds_len, variant, transcript) sequence_from_start_codon = str(sequence[start_codon_offset:]) # is this an in-frame mutations? if (n_ref - n_alt) % 3 == 0: return predict_in_frame_coding_effect( variant=variant, transcript=transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon) else: return predict_frameshift_coding_effect( variant=variant, transcript=transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon)
Given a minimal cDNA ref/alt nucleotide string pair and an offset into a given transcript, determine the coding effect of this nucleotide substitution onto the translated protein. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides we expect to find in the transcript's CDS trimmed_cdna_alt : str Alternate nucleotides we're replacing the reference with transcript_offset : int Offset into the full transcript sequence of the ref->alt substitution
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding.py#L21-L130
openvax/varcode
varcode/effects/effect_prediction.py
predict_variant_effects
def predict_variant_effects(variant, raise_on_error=False): """Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue. """ # if this variant isn't overlapping any genes, return a # Intergenic effect # TODO: look for nearby genes and mark those as Upstream and Downstream # effects if len(variant.gene_ids) == 0: effects = [Intergenic(variant)] else: # list of all MutationEffects for all genes & transcripts effects = [] # group transcripts by their gene ID transcripts_grouped_by_gene = groupby_field(variant.transcripts, 'gene_id') # want effects in the list grouped by the gene they come from for gene_id in sorted(variant.gene_ids): if gene_id not in transcripts_grouped_by_gene: # intragenic variant overlaps a gene but not any transcripts gene = variant.ensembl.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: # gene ID has transcripts overlapped by this variant for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript( variant=variant, transcript=transcript) else: effect = predict_variant_effect_on_transcript_or_failure( variant=variant, transcript=transcript) effects.append(effect) return EffectCollection(effects)
python
def predict_variant_effects(variant, raise_on_error=False): """Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue. """ # if this variant isn't overlapping any genes, return a # Intergenic effect # TODO: look for nearby genes and mark those as Upstream and Downstream # effects if len(variant.gene_ids) == 0: effects = [Intergenic(variant)] else: # list of all MutationEffects for all genes & transcripts effects = [] # group transcripts by their gene ID transcripts_grouped_by_gene = groupby_field(variant.transcripts, 'gene_id') # want effects in the list grouped by the gene they come from for gene_id in sorted(variant.gene_ids): if gene_id not in transcripts_grouped_by_gene: # intragenic variant overlaps a gene but not any transcripts gene = variant.ensembl.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: # gene ID has transcripts overlapped by this variant for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript( variant=variant, transcript=transcript) else: effect = predict_variant_effect_on_transcript_or_failure( variant=variant, transcript=transcript) effects.append(effect) return EffectCollection(effects)
Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction.py#L48-L92
openvax/varcode
varcode/effects/effect_prediction.py
predict_variant_effect_on_transcript_or_failure
def predict_variant_effect_on_transcript_or_failure(variant, transcript): """ Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values. """ try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript)
python
def predict_variant_effect_on_transcript_or_failure(variant, transcript): """ Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values. """ try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript)
Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction.py#L95-L111
openvax/varcode
varcode/effects/effect_prediction.py
predict_variant_effect_on_transcript
def predict_variant_effect_on_transcript(variant, transcript): """Return the transcript effect (such as FrameShift) that results from applying this genomic variant to a particular transcript. Parameters ---------- transcript : Transcript Transcript we're going to apply mutation to. """ if transcript.__class__ is not Transcript: raise TypeError( "Expected %s : %s to have type Transcript" % ( transcript, type(transcript))) # check for non-coding transcripts first, since # every non-coding transcript is "incomplete". if not transcript.is_protein_coding: return NoncodingTranscript(variant, transcript) if not transcript.complete: return IncompleteTranscript(variant, transcript) # since we're using inclusive base-1 coordinates, # checking for overlap requires special logic for insertions is_insertion = variant.is_insertion # determine if any exons are deleted, and if not, # what is the closest exon and how far is this variant # from that exon (overlapping the exon = 0 distance) completely_lost_exons = [] # list of which (exon #, Exon) pairs this mutation overlaps overlapping_exon_numbers_and_exons = [] distance_to_nearest_exon = float("inf") start_in_exon = False end_in_exon = False nearest_exon = None variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end for i, exon in enumerate(transcript.exons): if variant_start <= exon.start and variant_end >= exon.end: completely_lost_exons.append(exon) if is_insertion and exon.strand == "+" and variant_end == exon.end: # insertions after an exon don't overlap the exon distance = 1 elif is_insertion and exon.strand == "-" and variant_start == exon.start: distance = 1 else: distance = exon.distance_to_interval(variant_start, variant_end) if distance == 0: overlapping_exon_numbers_and_exons.append((i + 1, exon)) # start is contained in current exon if exon.start <= variant_start <= exon.end: start_in_exon = True # end is contained in current exon if exon.end >= variant_end >= exon.start: end_in_exon = True elif distance < distance_to_nearest_exon: distance_to_nearest_exon = distance nearest_exon = exon if len(overlapping_exon_numbers_and_exons) == 0: intronic_effect_class = choose_intronic_effect_class( variant=variant, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) return intronic_effect_class( variant=variant, transcript=transcript, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) elif len(completely_lost_exons) > 0 or ( len(overlapping_exon_numbers_and_exons) > 1): # if spanning multiple exons, or completely deleted an exon # then consider that an ExonLoss mutation exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons] return ExonLoss(variant, transcript, exons) assert len(overlapping_exon_numbers_and_exons) == 1 exon_number, exon = overlapping_exon_numbers_and_exons[0] exonic_effect_annotation = exonic_transcript_effect( variant, exon, exon_number, transcript) # simple case: both start and end are in the same if start_in_exon and end_in_exon: return exonic_effect_annotation elif isinstance(exonic_effect_annotation, ExonicSpliceSite): # if mutation bleeds over into intro but even just # the exonic portion got annotated as an exonic splice site # then return it return exonic_effect_annotation return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=exonic_effect_annotation)
python
def predict_variant_effect_on_transcript(variant, transcript): """Return the transcript effect (such as FrameShift) that results from applying this genomic variant to a particular transcript. Parameters ---------- transcript : Transcript Transcript we're going to apply mutation to. """ if transcript.__class__ is not Transcript: raise TypeError( "Expected %s : %s to have type Transcript" % ( transcript, type(transcript))) # check for non-coding transcripts first, since # every non-coding transcript is "incomplete". if not transcript.is_protein_coding: return NoncodingTranscript(variant, transcript) if not transcript.complete: return IncompleteTranscript(variant, transcript) # since we're using inclusive base-1 coordinates, # checking for overlap requires special logic for insertions is_insertion = variant.is_insertion # determine if any exons are deleted, and if not, # what is the closest exon and how far is this variant # from that exon (overlapping the exon = 0 distance) completely_lost_exons = [] # list of which (exon #, Exon) pairs this mutation overlaps overlapping_exon_numbers_and_exons = [] distance_to_nearest_exon = float("inf") start_in_exon = False end_in_exon = False nearest_exon = None variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end for i, exon in enumerate(transcript.exons): if variant_start <= exon.start and variant_end >= exon.end: completely_lost_exons.append(exon) if is_insertion and exon.strand == "+" and variant_end == exon.end: # insertions after an exon don't overlap the exon distance = 1 elif is_insertion and exon.strand == "-" and variant_start == exon.start: distance = 1 else: distance = exon.distance_to_interval(variant_start, variant_end) if distance == 0: overlapping_exon_numbers_and_exons.append((i + 1, exon)) # start is contained in current exon if exon.start <= variant_start <= exon.end: start_in_exon = True # end is contained in current exon if exon.end >= variant_end >= exon.start: end_in_exon = True elif distance < distance_to_nearest_exon: distance_to_nearest_exon = distance nearest_exon = exon if len(overlapping_exon_numbers_and_exons) == 0: intronic_effect_class = choose_intronic_effect_class( variant=variant, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) return intronic_effect_class( variant=variant, transcript=transcript, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) elif len(completely_lost_exons) > 0 or ( len(overlapping_exon_numbers_and_exons) > 1): # if spanning multiple exons, or completely deleted an exon # then consider that an ExonLoss mutation exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons] return ExonLoss(variant, transcript, exons) assert len(overlapping_exon_numbers_and_exons) == 1 exon_number, exon = overlapping_exon_numbers_and_exons[0] exonic_effect_annotation = exonic_transcript_effect( variant, exon, exon_number, transcript) # simple case: both start and end are in the same if start_in_exon and end_in_exon: return exonic_effect_annotation elif isinstance(exonic_effect_annotation, ExonicSpliceSite): # if mutation bleeds over into intro but even just # the exonic portion got annotated as an exonic splice site # then return it return exonic_effect_annotation return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=exonic_effect_annotation)
Return the transcript effect (such as FrameShift) that results from applying this genomic variant to a particular transcript. Parameters ---------- transcript : Transcript Transcript we're going to apply mutation to.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction.py#L114-L220
openvax/varcode
varcode/effects/effect_prediction.py
choose_intronic_effect_class
def choose_intronic_effect_class( variant, nearest_exon, distance_to_exon): """ Infer effect of variant which does not overlap any exon of the given transcript. """ assert distance_to_exon > 0, \ "Expected intronic effect to have distance_to_exon > 0, got %d" % ( distance_to_exon,) if nearest_exon.strand == "+": # if exon on positive strand start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or (variant.is_insertion and start_same) else: # if exon on negative strand end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or (variant.is_insertion and end_same) # distance cutoffs based on consensus splice sequences from # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/ # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # 3' splice site: YAG|R if distance_to_exon <= 2: if before_exon: # 2 last nucleotides of intron before exon are the splice # acceptor site, typically "AG" return SpliceAcceptor else: # 2 first nucleotides of intron after exon are the splice donor # site, typically "GT" return SpliceDonor elif not before_exon and distance_to_exon <= 6: # variants in nucleotides 3-6 at start of intron aren't as certain # to cause problems as nucleotides 1-2 but still implicated in # alternative splicing return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: # nucleotide -3 before exon is part of the 3' splicing # motif but allows for more degeneracy than the -2, -1 nucleotides return IntronicSpliceSite else: # intronic mutation unrelated to splicing return Intronic
python
def choose_intronic_effect_class( variant, nearest_exon, distance_to_exon): """ Infer effect of variant which does not overlap any exon of the given transcript. """ assert distance_to_exon > 0, \ "Expected intronic effect to have distance_to_exon > 0, got %d" % ( distance_to_exon,) if nearest_exon.strand == "+": # if exon on positive strand start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or (variant.is_insertion and start_same) else: # if exon on negative strand end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or (variant.is_insertion and end_same) # distance cutoffs based on consensus splice sequences from # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/ # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # 3' splice site: YAG|R if distance_to_exon <= 2: if before_exon: # 2 last nucleotides of intron before exon are the splice # acceptor site, typically "AG" return SpliceAcceptor else: # 2 first nucleotides of intron after exon are the splice donor # site, typically "GT" return SpliceDonor elif not before_exon and distance_to_exon <= 6: # variants in nucleotides 3-6 at start of intron aren't as certain # to cause problems as nucleotides 1-2 but still implicated in # alternative splicing return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: # nucleotide -3 before exon is part of the 3' splicing # motif but allows for more degeneracy than the -2, -1 nucleotides return IntronicSpliceSite else: # intronic mutation unrelated to splicing return Intronic
Infer effect of variant which does not overlap any exon of the given transcript.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction.py#L223-L271
openvax/varcode
varcode/effects/effect_prediction.py
exonic_transcript_effect
def exonic_transcript_effect(variant, exon, exon_number, transcript): """Effect of this variant on a Transcript, assuming we already know that this variant overlaps some exon of the transcript. Parameters ---------- variant : Variant exon : pyensembl.Exon Exon which this variant overlaps exon_number : int Index (starting from 1) of the given exon in the transcript's sequence of exons. transcript : pyensembl.Transcript """ genome_ref = variant.trimmed_ref genome_alt = variant.trimmed_alt variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end # clip mutation to only affect the current exon if variant_start < exon.start: # if mutation starts before current exon then only look # at nucleotides which overlap the exon logger.info('Mutation in variant %s starts before exon %s', variant, exon) assert len(genome_ref) > 0, "Unexpected insertion into intron" n_skip_start = exon.start - variant_start genome_ref = genome_ref[n_skip_start:] genome_alt = genome_alt[n_skip_start:] genome_start = exon.start else: genome_start = variant_start if variant_end > exon.end: # if mutation goes past exon end then only look at nucleotides # which overlap the exon logger.info('Mutation in variant %s ends after exon %s', variant, exon) n_skip_end = variant_end - exon.end genome_ref = genome_ref[:-n_skip_end] genome_alt = genome_alt[:len(genome_ref)] genome_end = exon.end else: genome_end = variant_end transcript_offset = interval_offset_on_transcript( genome_start, genome_end, transcript) if transcript.on_backward_strand: cdna_ref = reverse_complement(genome_ref) cdna_alt = reverse_complement(genome_alt) else: cdna_ref = genome_ref cdna_alt = genome_alt n_ref = len(cdna_ref) expected_ref = str( transcript.sequence[transcript_offset:transcript_offset + n_ref]) if cdna_ref != expected_ref: raise ValueError( ("Found ref nucleotides '%s' in sequence" " of %s at offset %d (chromosome positions %d:%d)" " but variant %s has '%s'") % ( expected_ref, transcript, transcript_offset, genome_start, genome_end, variant, cdna_ref)) utr5_length = min(transcript.start_codon_spliced_offsets) # does the variant start inside the 5' UTR? if utr5_length > transcript_offset: # does the variant end after the 5' UTR, within the coding region? if utr5_length < transcript_offset + n_ref: # TODO: we *might* lose the Kozak sequence or the start codon # but without looking at the modified sequence how can we tell # for sure that this is a start-loss variant? return StartLoss(variant, transcript) else: # if variant contained within 5' UTR return FivePrimeUTR(variant, transcript) utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1 if transcript_offset >= utr3_offset: return ThreePrimeUTR(variant, transcript) exon_start_offset = interval_offset_on_transcript( exon.start, exon.end, transcript) exon_end_offset = exon_start_offset + len(exon) - 1 # Further below we're going to try to predict exonic splice site # modifications, which will take this effect_annotation as their # alternative hypothesis for what happens if splicing doesn't change. # If the mutation doesn't affect an exonic splice site, then # we'll just return this effect. coding_effect_annotation = predict_variant_coding_effect_on_transcript( variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref, trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset) if changes_exonic_splice_site( transcript=transcript, transcript_ref=cdna_ref, transcript_alt=cdna_alt, transcript_offset=transcript_offset, exon_start_offset=exon_start_offset, exon_end_offset=exon_end_offset, exon_number=exon_number): return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=coding_effect_annotation) return coding_effect_annotation
python
def exonic_transcript_effect(variant, exon, exon_number, transcript): """Effect of this variant on a Transcript, assuming we already know that this variant overlaps some exon of the transcript. Parameters ---------- variant : Variant exon : pyensembl.Exon Exon which this variant overlaps exon_number : int Index (starting from 1) of the given exon in the transcript's sequence of exons. transcript : pyensembl.Transcript """ genome_ref = variant.trimmed_ref genome_alt = variant.trimmed_alt variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end # clip mutation to only affect the current exon if variant_start < exon.start: # if mutation starts before current exon then only look # at nucleotides which overlap the exon logger.info('Mutation in variant %s starts before exon %s', variant, exon) assert len(genome_ref) > 0, "Unexpected insertion into intron" n_skip_start = exon.start - variant_start genome_ref = genome_ref[n_skip_start:] genome_alt = genome_alt[n_skip_start:] genome_start = exon.start else: genome_start = variant_start if variant_end > exon.end: # if mutation goes past exon end then only look at nucleotides # which overlap the exon logger.info('Mutation in variant %s ends after exon %s', variant, exon) n_skip_end = variant_end - exon.end genome_ref = genome_ref[:-n_skip_end] genome_alt = genome_alt[:len(genome_ref)] genome_end = exon.end else: genome_end = variant_end transcript_offset = interval_offset_on_transcript( genome_start, genome_end, transcript) if transcript.on_backward_strand: cdna_ref = reverse_complement(genome_ref) cdna_alt = reverse_complement(genome_alt) else: cdna_ref = genome_ref cdna_alt = genome_alt n_ref = len(cdna_ref) expected_ref = str( transcript.sequence[transcript_offset:transcript_offset + n_ref]) if cdna_ref != expected_ref: raise ValueError( ("Found ref nucleotides '%s' in sequence" " of %s at offset %d (chromosome positions %d:%d)" " but variant %s has '%s'") % ( expected_ref, transcript, transcript_offset, genome_start, genome_end, variant, cdna_ref)) utr5_length = min(transcript.start_codon_spliced_offsets) # does the variant start inside the 5' UTR? if utr5_length > transcript_offset: # does the variant end after the 5' UTR, within the coding region? if utr5_length < transcript_offset + n_ref: # TODO: we *might* lose the Kozak sequence or the start codon # but without looking at the modified sequence how can we tell # for sure that this is a start-loss variant? return StartLoss(variant, transcript) else: # if variant contained within 5' UTR return FivePrimeUTR(variant, transcript) utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1 if transcript_offset >= utr3_offset: return ThreePrimeUTR(variant, transcript) exon_start_offset = interval_offset_on_transcript( exon.start, exon.end, transcript) exon_end_offset = exon_start_offset + len(exon) - 1 # Further below we're going to try to predict exonic splice site # modifications, which will take this effect_annotation as their # alternative hypothesis for what happens if splicing doesn't change. # If the mutation doesn't affect an exonic splice site, then # we'll just return this effect. coding_effect_annotation = predict_variant_coding_effect_on_transcript( variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref, trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset) if changes_exonic_splice_site( transcript=transcript, transcript_ref=cdna_ref, transcript_alt=cdna_alt, transcript_offset=transcript_offset, exon_start_offset=exon_start_offset, exon_end_offset=exon_end_offset, exon_number=exon_number): return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=coding_effect_annotation) return coding_effect_annotation
Effect of this variant on a Transcript, assuming we already know that this variant overlaps some exon of the transcript. Parameters ---------- variant : Variant exon : pyensembl.Exon Exon which this variant overlaps exon_number : int Index (starting from 1) of the given exon in the transcript's sequence of exons. transcript : pyensembl.Transcript
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction.py#L274-L397
openvax/varcode
varcode/common.py
apply_groupby
def apply_groupby(records, fn, skip_none=False): """ Given a list of objects, group them into a dictionary by applying fn to each one and using returned values as a dictionary key. Parameters ---------- records : list fn : function skip_none : bool If False, then None can be a key in the returned dictionary, otherwise records whose key value is None get skipped. Returns dict. """ # create an empty list for every new key groups = defaultdict(list) for record in records: value = fn(record) if value is not None or not skip_none: groups[value].append(record) return dict(groups)
python
def apply_groupby(records, fn, skip_none=False): """ Given a list of objects, group them into a dictionary by applying fn to each one and using returned values as a dictionary key. Parameters ---------- records : list fn : function skip_none : bool If False, then None can be a key in the returned dictionary, otherwise records whose key value is None get skipped. Returns dict. """ # create an empty list for every new key groups = defaultdict(list) for record in records: value = fn(record) if value is not None or not skip_none: groups[value].append(record) return dict(groups)
Given a list of objects, group them into a dictionary by applying fn to each one and using returned values as a dictionary key. Parameters ---------- records : list fn : function skip_none : bool If False, then None can be a key in the returned dictionary, otherwise records whose key value is None get skipped. Returns dict.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/common.py#L21-L46
openvax/varcode
varcode/common.py
groupby_field
def groupby_field(records, field_name, skip_none=True): """ Given a list of objects, group them into a dictionary by the unique values of a given field name. """ return apply_groupby( records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
python
def groupby_field(records, field_name, skip_none=True): """ Given a list of objects, group them into a dictionary by the unique values of a given field name. """ return apply_groupby( records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
Given a list of objects, group them into a dictionary by the unique values of a given field name.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/common.py#L49-L57
openvax/varcode
varcode/common.py
memoize
def memoize(fn): """ Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """ memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
python
def memoize(fn): """ Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """ memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/common.py#L60-L77
openvax/varcode
varcode/effects/transcript_helpers.py
interval_offset_on_transcript
def interval_offset_on_transcript(start, end, transcript): """ Given an interval [start:end] and a particular transcript, return the start offset of the interval relative to the chromosomal positions of the transcript. """ # ensure that start_pos:end_pos overlap with transcript positions if start > end: raise ValueError( "start_pos %d shouldn't be greater than end_pos %d" % ( start, end)) if start > transcript.end: raise ValueError( "Range %d:%d starts after transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) if end < transcript.start: raise ValueError( "Range %d:%d ends before transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) # trim the start position to the beginning of the transcript if start < transcript.start: start = transcript.start # trim the end position to the end of the transcript if end > transcript.end: end = transcript.end # return earliest offset into the spliced transcript return min( transcript.spliced_offset(start), transcript.spliced_offset(end))
python
def interval_offset_on_transcript(start, end, transcript): """ Given an interval [start:end] and a particular transcript, return the start offset of the interval relative to the chromosomal positions of the transcript. """ # ensure that start_pos:end_pos overlap with transcript positions if start > end: raise ValueError( "start_pos %d shouldn't be greater than end_pos %d" % ( start, end)) if start > transcript.end: raise ValueError( "Range %d:%d starts after transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) if end < transcript.start: raise ValueError( "Range %d:%d ends before transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) # trim the start position to the beginning of the transcript if start < transcript.start: start = transcript.start # trim the end position to the end of the transcript if end > transcript.end: end = transcript.end # return earliest offset into the spliced transcript return min( transcript.spliced_offset(start), transcript.spliced_offset(end))
Given an interval [start:end] and a particular transcript, return the start offset of the interval relative to the chromosomal positions of the transcript.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/transcript_helpers.py#L18-L54
openvax/varcode
varcode/vcf_output.py
variants_to_vcf
def variants_to_vcf(variants, variant_to_metadata, out=sys.stdout): """Output a VCF file from a list of Variant records. Parameters ---------- variants : iterable Variant objects variant_to_metadata : dict Dictionary mapping each variant in `variants` to a dictionary of metadata. """ # TODO: The variant metadata dictionary (in the `VariantCollection`) # contains different data depending on the original input file format (VCF, # MAF, CSV). It's easy to output variants originally in VCF format, but we # might want to consider how fields from MAF (for example) map to those in # VCF. # TODO: The VCF file we output doesn't contain any VCF metadata headers, as # the original headers were thrown away when the VCF file was parsed. We # may want to keep some of that information and/or infer some of the # headers based on the variants themselves. The former is difficult because # merge conflicts will inevitably occur; the latter is difficult because # the variants themselves don't contain all the information required for # these metadata headers (e.g., descriptions). # # As a side note, adding headers for certain fields will make them parse # correctly -- into an integer instead of a string (the default), for # example. # TODO: If we maintain headers (see above TODO), what should happen if # variant sources use different reference genomes? # # If we don't maintain headers, what should the default reference genome # be? This code chose one fairly arbitrarily. # TODO: If we end up needing more functions to "build" VCF record fields # (see functions below), we may want to abstract away the individual # functions and create a mapping from field to format function. def get_metadata_field(key, variant, default='.'): """Retrieve field from variant metadata dictionary.""" val = variant_to_metadata[variant].get(key) if val is None: return default return val def build_filter_field(variant): """Build the filter field from the given variant. The `filter` field, as stored in the variants metadata dictionary, comes in 3 flavors: - empty list: 1+ filters were run and none failed - non-empty list: 1+ filters were run and 1+ failed - `None`: no filters were run This function maps each of these internal representations to their corresponding VCF representations. """ filter_metadata = get_metadata_field('filter', variant) if type(filter_metadata) == list: return 'PASS' if filter_metadata == [] else ';'.join(filter_metadata) else: # TODO: Can the filter field ever be something other than the 3 # possibilities described in this function's doc comment? bad_value_msg = ( 'Filter metadata field took on unexpected value `{}`. Update ' 'code to account for this value.').format(str(filter_metadata)) assert filter_metadata == '.', bad_value_msg return filter_metadata def build_info_field(variant): """Build the info field from the given variant. Format is `<key>=<val>,...;<key>=<val>,...;<key>=<val>,...`. """ def build_info_pair(key, val): """Build key/val pair for info field.""" # Note: Different from `val == True`, which returns True when `val == 1`. if val is True: return key if type(val) == list: val = ','.join(map(str, val)) else: val = str(val) return '{}={}'.format(key, val) info_dict = get_metadata_field('info', variant, default={}) if not info_dict: return '.' return ';'.join(build_info_pair(k, v) for (k, v) in info_dict.items()) def build_format_field(variant): """Build the sample format string from the given variant. Each sample column follows this format for the specified variant. """ sample_info = get_metadata_field('sample_info', variant, default={}) return ':'.join(list(sample_info.values())[0].keys()) if sample_info else '.' def build_sample_fields(variant): """Build the sample fields for the given variant.""" def build_sample_field(sample): """Build a specific sample's field (i.e., one sample column).""" sample_vals = sample_info[sample].values() return ':'.join(build_sample_val(val) for val in sample_vals) def build_sample_val(sample_val): """Build a specific value for a sample (i.e., one value for one column).""" if type(sample_val) is list: return ','.join(map(str, sample_val)) elif sample_val is not None: return str(sample_val) else: return '.' sample_info = get_metadata_field('sample_info', variant, default={}) return list(build_sample_field(sample) for sample in sample_info) def build_vcf_record(variant, add_sample_info): """Return a list of all the variant's VCF record fields.""" record = [ str(variant.original_contig), str(variant.original_start), get_metadata_field('id', variant), variant.original_ref, variant.original_alt, str(get_metadata_field('qual', variant)), build_filter_field(variant), build_info_field(variant), ] if add_sample_info: record.append(build_format_field(variant)) record.extend(build_sample_fields(variant)) return record def merge_duplicate_variants(): """Merge duplicate variants (according to ID) and return *list* of merged, sorted variants. Multiple `Variant`s can have the same VCF id (e.g., those variants which originally had > 1 alternate base), but we can't output a VCF file with multiple records having the same id. This function merges those duplicate variants. """ def construct_id2variants(): """Construct dictionary which maps variant IDs to `Variant`s.""" id2variants = defaultdict(list) for variant in variants: # Note: For variants without IDs (e.g., those that came from # MAF files), we assign the missing value. variant_id = get_metadata_field('id', variant) id2variants[variant_id].append(variant) return id2variants def merge_variant_list(duplicates): """Merge duplicate variant list into one.""" # TODO: Currently assumes that only alternate bases differ, but we may # want or need to merge variants that have the same ID but different # contigs, positions, or reference bases. If merging isn't possible (which # I think is the case for many situations), it may be easiest to assign # the "missing value" to the IDs and move on. assert len(duplicates) > 0 assert all(v.original_contig == duplicates[0].original_contig and v.original_start == duplicates[0].original_start and v.original_ref == duplicates[0].original_ref for v in duplicates) import copy merged = copy.copy(duplicates[0]) merged.original_alt = ','.join(duplicate.original_alt for duplicate in duplicates) return merged id2variants = construct_id2variants() variants_no_id = id2variants.pop('.', []) # don't want to merge variants w/ no id merged_variants = list(map(merge_variant_list, id2variants.values())) + variants_no_id # groups variants by contig; relative ordering of contigs doesn't matter return sorted( merged_variants, key=lambda v: (str(v.original_contig), str(v.original_start))) def get_sample_names(): """Return the sample names for all variants.""" # TODO: For now, ensures that every variant has the same samples. If they didn't, # we'd have to use the missing value for VCF records with no data on a given # sample. In and of itself, that isn't a problem until the format field contains # GT (genotype), which is required for all samples if present in the format field. # # A couple of ways to handle this: # # 1) Ignore this requirement of VCF files. I'd assume this would still be # compatible with varcode, but may not be with other VCF parsers. # 2) Remove the GT field from those records where this rule would be violated. # This is sad because we lose information. # # It's also important to note that if/when we combine all samples, we'll have to # add a `.` (missing value) for those samples in which a variant has no data. # Check the VCF spec to make sure this is valid; if not, we may have to write # `.:.:.:.` -- one `.` for each field in the format string. # # Moreover, we'll want to note whether we care about maintaining the relative # ordering of the sample names in the original VCF files. This probably isn't # important, but the code below does not do this (it effectively alphabetizes the # sample names because of the use of `set`). sample_names = set() for variant in variants: sample_info = get_metadata_field('sample_info', variant, default={}) addl_sample_names = set(sample_info.keys()) # Ensures all variants have the same samples. if sample_names and sample_names != addl_sample_names: return [] sample_names.update(addl_sample_names) return list(sample_names) headers = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO'] sample_names = get_sample_names() if sample_names: headers += ['FORMAT'] + sample_names unique_variants_list = merge_duplicate_variants() # usually we have just one reference genome for the whole variant collection # but if variants from multiple sources have been merged then we might # not be able to write out a VCF since the individual variants may be using # different coordinate systems genome_names = list({v.ensembl.reference_name for v in unique_variants_list}) if len(genome_names) > 1: raise ValueError( "Cannot create VCF for variants with multiple reference genomes: %s" % ( genome_names,)) genome_name = genome_names[0] print('##fileformat=VCFv4.2', file=out) print('##reference=%s' % genome_name, file=out) print('\t'.join(headers), file=out) for variant in unique_variants_list: record = build_vcf_record(variant, add_sample_info=bool(sample_names)) print('\t'.join(record), file=out)
python
def variants_to_vcf(variants, variant_to_metadata, out=sys.stdout): """Output a VCF file from a list of Variant records. Parameters ---------- variants : iterable Variant objects variant_to_metadata : dict Dictionary mapping each variant in `variants` to a dictionary of metadata. """ # TODO: The variant metadata dictionary (in the `VariantCollection`) # contains different data depending on the original input file format (VCF, # MAF, CSV). It's easy to output variants originally in VCF format, but we # might want to consider how fields from MAF (for example) map to those in # VCF. # TODO: The VCF file we output doesn't contain any VCF metadata headers, as # the original headers were thrown away when the VCF file was parsed. We # may want to keep some of that information and/or infer some of the # headers based on the variants themselves. The former is difficult because # merge conflicts will inevitably occur; the latter is difficult because # the variants themselves don't contain all the information required for # these metadata headers (e.g., descriptions). # # As a side note, adding headers for certain fields will make them parse # correctly -- into an integer instead of a string (the default), for # example. # TODO: If we maintain headers (see above TODO), what should happen if # variant sources use different reference genomes? # # If we don't maintain headers, what should the default reference genome # be? This code chose one fairly arbitrarily. # TODO: If we end up needing more functions to "build" VCF record fields # (see functions below), we may want to abstract away the individual # functions and create a mapping from field to format function. def get_metadata_field(key, variant, default='.'): """Retrieve field from variant metadata dictionary.""" val = variant_to_metadata[variant].get(key) if val is None: return default return val def build_filter_field(variant): """Build the filter field from the given variant. The `filter` field, as stored in the variants metadata dictionary, comes in 3 flavors: - empty list: 1+ filters were run and none failed - non-empty list: 1+ filters were run and 1+ failed - `None`: no filters were run This function maps each of these internal representations to their corresponding VCF representations. """ filter_metadata = get_metadata_field('filter', variant) if type(filter_metadata) == list: return 'PASS' if filter_metadata == [] else ';'.join(filter_metadata) else: # TODO: Can the filter field ever be something other than the 3 # possibilities described in this function's doc comment? bad_value_msg = ( 'Filter metadata field took on unexpected value `{}`. Update ' 'code to account for this value.').format(str(filter_metadata)) assert filter_metadata == '.', bad_value_msg return filter_metadata def build_info_field(variant): """Build the info field from the given variant. Format is `<key>=<val>,...;<key>=<val>,...;<key>=<val>,...`. """ def build_info_pair(key, val): """Build key/val pair for info field.""" # Note: Different from `val == True`, which returns True when `val == 1`. if val is True: return key if type(val) == list: val = ','.join(map(str, val)) else: val = str(val) return '{}={}'.format(key, val) info_dict = get_metadata_field('info', variant, default={}) if not info_dict: return '.' return ';'.join(build_info_pair(k, v) for (k, v) in info_dict.items()) def build_format_field(variant): """Build the sample format string from the given variant. Each sample column follows this format for the specified variant. """ sample_info = get_metadata_field('sample_info', variant, default={}) return ':'.join(list(sample_info.values())[0].keys()) if sample_info else '.' def build_sample_fields(variant): """Build the sample fields for the given variant.""" def build_sample_field(sample): """Build a specific sample's field (i.e., one sample column).""" sample_vals = sample_info[sample].values() return ':'.join(build_sample_val(val) for val in sample_vals) def build_sample_val(sample_val): """Build a specific value for a sample (i.e., one value for one column).""" if type(sample_val) is list: return ','.join(map(str, sample_val)) elif sample_val is not None: return str(sample_val) else: return '.' sample_info = get_metadata_field('sample_info', variant, default={}) return list(build_sample_field(sample) for sample in sample_info) def build_vcf_record(variant, add_sample_info): """Return a list of all the variant's VCF record fields.""" record = [ str(variant.original_contig), str(variant.original_start), get_metadata_field('id', variant), variant.original_ref, variant.original_alt, str(get_metadata_field('qual', variant)), build_filter_field(variant), build_info_field(variant), ] if add_sample_info: record.append(build_format_field(variant)) record.extend(build_sample_fields(variant)) return record def merge_duplicate_variants(): """Merge duplicate variants (according to ID) and return *list* of merged, sorted variants. Multiple `Variant`s can have the same VCF id (e.g., those variants which originally had > 1 alternate base), but we can't output a VCF file with multiple records having the same id. This function merges those duplicate variants. """ def construct_id2variants(): """Construct dictionary which maps variant IDs to `Variant`s.""" id2variants = defaultdict(list) for variant in variants: # Note: For variants without IDs (e.g., those that came from # MAF files), we assign the missing value. variant_id = get_metadata_field('id', variant) id2variants[variant_id].append(variant) return id2variants def merge_variant_list(duplicates): """Merge duplicate variant list into one.""" # TODO: Currently assumes that only alternate bases differ, but we may # want or need to merge variants that have the same ID but different # contigs, positions, or reference bases. If merging isn't possible (which # I think is the case for many situations), it may be easiest to assign # the "missing value" to the IDs and move on. assert len(duplicates) > 0 assert all(v.original_contig == duplicates[0].original_contig and v.original_start == duplicates[0].original_start and v.original_ref == duplicates[0].original_ref for v in duplicates) import copy merged = copy.copy(duplicates[0]) merged.original_alt = ','.join(duplicate.original_alt for duplicate in duplicates) return merged id2variants = construct_id2variants() variants_no_id = id2variants.pop('.', []) # don't want to merge variants w/ no id merged_variants = list(map(merge_variant_list, id2variants.values())) + variants_no_id # groups variants by contig; relative ordering of contigs doesn't matter return sorted( merged_variants, key=lambda v: (str(v.original_contig), str(v.original_start))) def get_sample_names(): """Return the sample names for all variants.""" # TODO: For now, ensures that every variant has the same samples. If they didn't, # we'd have to use the missing value for VCF records with no data on a given # sample. In and of itself, that isn't a problem until the format field contains # GT (genotype), which is required for all samples if present in the format field. # # A couple of ways to handle this: # # 1) Ignore this requirement of VCF files. I'd assume this would still be # compatible with varcode, but may not be with other VCF parsers. # 2) Remove the GT field from those records where this rule would be violated. # This is sad because we lose information. # # It's also important to note that if/when we combine all samples, we'll have to # add a `.` (missing value) for those samples in which a variant has no data. # Check the VCF spec to make sure this is valid; if not, we may have to write # `.:.:.:.` -- one `.` for each field in the format string. # # Moreover, we'll want to note whether we care about maintaining the relative # ordering of the sample names in the original VCF files. This probably isn't # important, but the code below does not do this (it effectively alphabetizes the # sample names because of the use of `set`). sample_names = set() for variant in variants: sample_info = get_metadata_field('sample_info', variant, default={}) addl_sample_names = set(sample_info.keys()) # Ensures all variants have the same samples. if sample_names and sample_names != addl_sample_names: return [] sample_names.update(addl_sample_names) return list(sample_names) headers = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO'] sample_names = get_sample_names() if sample_names: headers += ['FORMAT'] + sample_names unique_variants_list = merge_duplicate_variants() # usually we have just one reference genome for the whole variant collection # but if variants from multiple sources have been merged then we might # not be able to write out a VCF since the individual variants may be using # different coordinate systems genome_names = list({v.ensembl.reference_name for v in unique_variants_list}) if len(genome_names) > 1: raise ValueError( "Cannot create VCF for variants with multiple reference genomes: %s" % ( genome_names,)) genome_name = genome_names[0] print('##fileformat=VCFv4.2', file=out) print('##reference=%s' % genome_name, file=out) print('\t'.join(headers), file=out) for variant in unique_variants_list: record = build_vcf_record(variant, add_sample_info=bool(sample_names)) print('\t'.join(record), file=out)
Output a VCF file from a list of Variant records. Parameters ---------- variants : iterable Variant objects variant_to_metadata : dict Dictionary mapping each variant in `variants` to a dictionary of metadata.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/vcf_output.py#L21-L268
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.filter_by_transcript_expression
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value)
python
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value)
Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L105-L126
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.filter_by_gene_expression
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated gene whose expression value in the gene_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value)
python
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated gene whose expression value in the gene_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value)
Filters effects to those which have an associated gene whose expression value in the gene_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L128-L149
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.filter_by_effect_priority
def filter_by_effect_priority(self, min_priority_class): """ Create a new EffectCollection containing only effects whose priority falls below the given class. """ min_priority = transcript_effect_priority_dict[min_priority_class] return self.filter( lambda effect: effect_priority(effect) >= min_priority)
python
def filter_by_effect_priority(self, min_priority_class): """ Create a new EffectCollection containing only effects whose priority falls below the given class. """ min_priority = transcript_effect_priority_dict[min_priority_class] return self.filter( lambda effect: effect_priority(effect) >= min_priority)
Create a new EffectCollection containing only effects whose priority falls below the given class.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L151-L158
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.detailed_string
def detailed_string(self): """ Create a long string with all transcript effects for each mutation, grouped by gene (if a mutation affects multiple genes). """ lines = [] # TODO: annoying to always write `groupby_result.items()`, # consider makings a GroupBy class which iterates over pairs # and also common helper methods like `map_values`. for variant, variant_effects in self.groupby_variant().items(): lines.append("\n%s" % variant) gene_effects_groups = variant_effects.groupby_gene_id() for (gene_id, gene_effects) in gene_effects_groups.items(): if gene_id: gene_name = variant.ensembl.gene_name_of_gene_id(gene_id) lines.append(" Gene: %s (%s)" % (gene_name, gene_id)) # place transcript effects with more significant impact # on top (e.g. FrameShift should go before NoncodingTranscript) for effect in sorted( gene_effects, key=effect_priority, reverse=True): lines.append(" -- %s" % effect) # if we only printed one effect for this gene then # it's redundant to print it again as the highest priority effect if len(variant_effects) > 1: best = variant_effects.top_priority_effect() lines.append(" Highest Priority Effect: %s" % best) return "\n".join(lines)
python
def detailed_string(self): """ Create a long string with all transcript effects for each mutation, grouped by gene (if a mutation affects multiple genes). """ lines = [] # TODO: annoying to always write `groupby_result.items()`, # consider makings a GroupBy class which iterates over pairs # and also common helper methods like `map_values`. for variant, variant_effects in self.groupby_variant().items(): lines.append("\n%s" % variant) gene_effects_groups = variant_effects.groupby_gene_id() for (gene_id, gene_effects) in gene_effects_groups.items(): if gene_id: gene_name = variant.ensembl.gene_name_of_gene_id(gene_id) lines.append(" Gene: %s (%s)" % (gene_name, gene_id)) # place transcript effects with more significant impact # on top (e.g. FrameShift should go before NoncodingTranscript) for effect in sorted( gene_effects, key=effect_priority, reverse=True): lines.append(" -- %s" % effect) # if we only printed one effect for this gene then # it's redundant to print it again as the highest priority effect if len(variant_effects) > 1: best = variant_effects.top_priority_effect() lines.append(" Highest Priority Effect: %s" % best) return "\n".join(lines)
Create a long string with all transcript effects for each mutation, grouped by gene (if a mutation affects multiple genes).
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L166-L196
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.top_priority_effect_per_variant
def top_priority_effect_per_variant(self): """Highest priority effect for each unique variant""" return OrderedDict( (variant, top_priority_effect(variant_effects)) for (variant, variant_effects) in self.groupby_variant().items())
python
def top_priority_effect_per_variant(self): """Highest priority effect for each unique variant""" return OrderedDict( (variant, top_priority_effect(variant_effects)) for (variant, variant_effects) in self.groupby_variant().items())
Highest priority effect for each unique variant
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L211-L216
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.top_priority_effect_per_transcript_id
def top_priority_effect_per_transcript_id(self): """Highest priority effect for each unique transcript ID""" return OrderedDict( (transcript_id, top_priority_effect(variant_effects)) for (transcript_id, variant_effects) in self.groupby_transcript_id().items())
python
def top_priority_effect_per_transcript_id(self): """Highest priority effect for each unique transcript ID""" return OrderedDict( (transcript_id, top_priority_effect(variant_effects)) for (transcript_id, variant_effects) in self.groupby_transcript_id().items())
Highest priority effect for each unique transcript ID
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L218-L223
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.top_priority_effect_per_gene_id
def top_priority_effect_per_gene_id(self): """Highest priority effect for each unique gene ID""" return OrderedDict( (gene_id, top_priority_effect(variant_effects)) for (gene_id, variant_effects) in self.groupby_gene_id().items())
python
def top_priority_effect_per_gene_id(self): """Highest priority effect for each unique gene ID""" return OrderedDict( (gene_id, top_priority_effect(variant_effects)) for (gene_id, variant_effects) in self.groupby_gene_id().items())
Highest priority effect for each unique gene ID
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L225-L230
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.effect_expression
def effect_expression(self, expression_levels): """ Parameters ---------- expression_levels : dict Dictionary mapping transcript IDs to length-normalized expression levels (either FPKM or TPM) Returns dictionary mapping each transcript effect to an expression quantity. Effects that don't have an associated transcript (e.g. Intergenic) will not be included. """ return OrderedDict( (effect, expression_levels.get(effect.transcript.id, 0.0)) for effect in self if effect.transcript is not None)
python
def effect_expression(self, expression_levels): """ Parameters ---------- expression_levels : dict Dictionary mapping transcript IDs to length-normalized expression levels (either FPKM or TPM) Returns dictionary mapping each transcript effect to an expression quantity. Effects that don't have an associated transcript (e.g. Intergenic) will not be included. """ return OrderedDict( (effect, expression_levels.get(effect.transcript.id, 0.0)) for effect in self if effect.transcript is not None)
Parameters ---------- expression_levels : dict Dictionary mapping transcript IDs to length-normalized expression levels (either FPKM or TPM) Returns dictionary mapping each transcript effect to an expression quantity. Effects that don't have an associated transcript (e.g. Intergenic) will not be included.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L232-L247
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.top_expression_effect
def top_expression_effect(self, expression_levels): """ Return effect whose transcript has the highest expression level. If none of the effects are expressed or have associated transcripts, then return None. In case of ties, add lexicographical sorting by effect priority and transcript length. """ effect_expression_dict = self.effect_expression(expression_levels) if len(effect_expression_dict) == 0: return None def key_fn(effect_fpkm_pair): """ Sort effects primarily by their expression level and secondarily by the priority logic used in `top_priority_effect`. """ (effect, fpkm) = effect_fpkm_pair return (fpkm, multi_gene_effect_sort_key(effect)) return max(effect_expression_dict.items(), key=key_fn)[0]
python
def top_expression_effect(self, expression_levels): """ Return effect whose transcript has the highest expression level. If none of the effects are expressed or have associated transcripts, then return None. In case of ties, add lexicographical sorting by effect priority and transcript length. """ effect_expression_dict = self.effect_expression(expression_levels) if len(effect_expression_dict) == 0: return None def key_fn(effect_fpkm_pair): """ Sort effects primarily by their expression level and secondarily by the priority logic used in `top_priority_effect`. """ (effect, fpkm) = effect_fpkm_pair return (fpkm, multi_gene_effect_sort_key(effect)) return max(effect_expression_dict.items(), key=key_fn)[0]
Return effect whose transcript has the highest expression level. If none of the effects are expressed or have associated transcripts, then return None. In case of ties, add lexicographical sorting by effect priority and transcript length.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L249-L270
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.to_dataframe
def to_dataframe(self): """Build a dataframe from the effect collection""" # list of properties to extract from Variant objects if they're # not None variant_properties = [ "contig", "start", "ref", "alt", "is_snv", "is_transversion", "is_transition" ] def row_from_effect(effect): row = OrderedDict() row['variant'] = str(effect.variant.short_description) for field_name in variant_properties: # if effect.variant is None then this column value will be None row[field_name] = getattr(effect.variant, field_name, None) row['gene_id'] = effect.gene_id row['gene_name'] = effect.gene_name row['transcript_id'] = effect.transcript_id row['transcript_name'] = effect.transcript_name row['effect_type'] = effect.__class__.__name__ row['effect'] = effect.short_description return row return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
python
def to_dataframe(self): """Build a dataframe from the effect collection""" # list of properties to extract from Variant objects if they're # not None variant_properties = [ "contig", "start", "ref", "alt", "is_snv", "is_transversion", "is_transition" ] def row_from_effect(effect): row = OrderedDict() row['variant'] = str(effect.variant.short_description) for field_name in variant_properties: # if effect.variant is None then this column value will be None row[field_name] = getattr(effect.variant, field_name, None) row['gene_id'] = effect.gene_id row['gene_name'] = effect.gene_name row['transcript_id'] = effect.transcript_id row['transcript_name'] = effect.transcript_name row['effect_type'] = effect.__class__.__name__ row['effect'] = effect.short_description return row return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
Build a dataframe from the effect collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L272-L301
openvax/varcode
varcode/util.py
random_variants
def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): """ Generate a VariantCollection with random variants that overlap at least one complete coding transcript. """ rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] # we should finish way before this loop is over but just in case # something is wrong with PyEnsembl we want to avoid an infinite loop for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
python
def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): """ Generate a VariantCollection with random variants that overlap at least one complete coding transcript. """ rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] # we should finish way before this loop is over but just in case # something is wrong with PyEnsembl we want to avoid an infinite loop for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
Generate a VariantCollection with random variants that overlap at least one complete coding transcript.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/util.py#L29-L92
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_server_info
def get_server_info(self, anonymous = True): """ Performs bind on the server and grabs the DSA info object. If anonymous is set to true, then it will perform anonymous bind, not using user credentials Otherwise it will use the credentials set in the object constructor. """ if anonymous == True: logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) conn = Connection(server, auto_bind=True) logger.debug('Got server info') else: logger.debug('Getting server info via credentials supplied on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) if self.use_sspi == True: conn = self.monkeypatch() else: conn = Connection(self._srv, user=self.login_credential.get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod()) logger.debug('Performing BIND to server %s' % self.target_server.get_host()) if not self._con.bind(): if 'description' in self._con.result: raise Exception('Failed to bind to server! Reason: %s' % conn.result['description']) raise Exception('Failed to bind to server! Reason: %s' % conn.result) logger.debug('Connected to server!') return server.info
python
def get_server_info(self, anonymous = True): """ Performs bind on the server and grabs the DSA info object. If anonymous is set to true, then it will perform anonymous bind, not using user credentials Otherwise it will use the credentials set in the object constructor. """ if anonymous == True: logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) conn = Connection(server, auto_bind=True) logger.debug('Got server info') else: logger.debug('Getting server info via credentials supplied on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) if self.use_sspi == True: conn = self.monkeypatch() else: conn = Connection(self._srv, user=self.login_credential.get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod()) logger.debug('Performing BIND to server %s' % self.target_server.get_host()) if not self._con.bind(): if 'description' in self._con.result: raise Exception('Failed to bind to server! Reason: %s' % conn.result['description']) raise Exception('Failed to bind to server! Reason: %s' % conn.result) logger.debug('Connected to server!') return server.info
Performs bind on the server and grabs the DSA info object. If anonymous is set to true, then it will perform anonymous bind, not using user credentials Otherwise it will use the credentials set in the object constructor.
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L94-L118
skelsec/msldap
msldap/core/msldap.py
MSLDAP.pagedsearch
def pagedsearch(self, ldap_filter, attributes): """ Performs a paged search on the AD, using the filter and attributes as a normal query does. Needs to connect to the server first! ldap_filter: str : LDAP query filter attributes: list : Attributes list to recieve in the result """ logger.debug('Paged search, filter: %s attributes: %s' % (ldap_filter, ','.join(attributes))) ctr = 0 entries = self._con.extend.standard.paged_search(self._tree, ldap_filter, attributes = attributes, paged_size = self.ldap_query_page_size) for entry in entries: if 'raw_attributes' in entry and 'attributes' in entry: # TODO: return ldapuser object ctr += 1 if ctr % self.ldap_query_page_size == 0: logger.info('New page requested. Result count: %d' % ctr) yield entry
python
def pagedsearch(self, ldap_filter, attributes): """ Performs a paged search on the AD, using the filter and attributes as a normal query does. Needs to connect to the server first! ldap_filter: str : LDAP query filter attributes: list : Attributes list to recieve in the result """ logger.debug('Paged search, filter: %s attributes: %s' % (ldap_filter, ','.join(attributes))) ctr = 0 entries = self._con.extend.standard.paged_search(self._tree, ldap_filter, attributes = attributes, paged_size = self.ldap_query_page_size) for entry in entries: if 'raw_attributes' in entry and 'attributes' in entry: # TODO: return ldapuser object ctr += 1 if ctr % self.ldap_query_page_size == 0: logger.info('New page requested. Result count: %d' % ctr) yield entry
Performs a paged search on the AD, using the filter and attributes as a normal query does. Needs to connect to the server first! ldap_filter: str : LDAP query filter attributes: list : Attributes list to recieve in the result
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L151-L167
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_all_user_objects
def get_all_user_objects(self): """ Fetches all user objects from the AD, and returns MSADUser object """ logger.debug('Polling AD for all user objects') ldap_filter = r'(objectClass=user)' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
def get_all_user_objects(self): """ Fetches all user objects from the AD, and returns MSADUser object """ logger.debug('Polling AD for all user objects') ldap_filter = r'(objectClass=user)' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
Fetches all user objects from the AD, and returns MSADUser object
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L171-L182
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_user
def get_user(self, sAMAccountName): """ Fetches one user object from the AD, based on the sAMAccountName attribute (read: username) """ logger.debug('Polling AD for user %s'% sAMAccountName) ldap_filter = r'(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
def get_user(self, sAMAccountName): """ Fetches one user object from the AD, based on the sAMAccountName attribute (read: username) """ logger.debug('Polling AD for user %s'% sAMAccountName) ldap_filter = r'(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
Fetches one user object from the AD, based on the sAMAccountName attribute (read: username)
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L184-L194
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_ad_info
def get_ad_info(self): """ Polls for basic AD information (needed for determine password usage characteristics!) """ logger.debug('Polling AD for basic info') ldap_filter = r'(distinguishedName=%s)' % self._tree attributes = MSADInfo.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): self._ldapinfo = MSADInfo.from_ldap(entry) return self._ldapinfo logger.debug('Poll finished!')
python
def get_ad_info(self): """ Polls for basic AD information (needed for determine password usage characteristics!) """ logger.debug('Polling AD for basic info') ldap_filter = r'(distinguishedName=%s)' % self._tree attributes = MSADInfo.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): self._ldapinfo = MSADInfo.from_ldap(entry) return self._ldapinfo logger.debug('Poll finished!')
Polls for basic AD information (needed for determine password usage characteristics!)
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L196-L207
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_all_service_user_objects
def get_all_service_user_objects(self, include_machine = False): """ Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(servicePrincipalName=*)' else: ldap_filter = r'(&(servicePrincipalName=*)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
def get_all_service_user_objects(self, include_machine = False): """ Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(servicePrincipalName=*)' else: ldap_filter = r'(&(servicePrincipalName=*)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L209-L224
skelsec/msldap
msldap/core/msldap.py
MSLDAP.get_all_knoreq_user_objects
def get_all_knoreq_user_objects(self, include_machine = False): """ Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object. """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(userAccountControl:1.2.840.113556.1.4.803:=4194304)' else: ldap_filter = r'(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
def get_all_knoreq_user_objects(self, include_machine = False): """ Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object. """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(userAccountControl:1.2.840.113556.1.4.803:=4194304)' else: ldap_filter = r'(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object.
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/core/msldap.py#L226-L241
skelsec/msldap
msldap/ldap_objects/common.py
vn
def vn(x): """ value or none, returns none if x is an empty list """ if x == []: return None if isinstance(x, list): return '|'.join(x) if isinstance(x, datetime): return x.isoformat() return x
python
def vn(x): """ value or none, returns none if x is an empty list """ if x == []: return None if isinstance(x, list): return '|'.join(x) if isinstance(x, datetime): return x.isoformat() return x
value or none, returns none if x is an empty list
https://github.com/skelsec/msldap/blob/bb873728afda9ca105d57d2740a28e319a78aa71/msldap/ldap_objects/common.py#L10-L20
lebinh/aq
aq/engines.py
convert_tags_to_dict
def convert_tags_to_dict(item): """ Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly. """ if hasattr(item, 'tags'): tags = item.tags if isinstance(tags, list): tags_dict = {} for kv_dict in tags: if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict: tags_dict[kv_dict['Key']] = kv_dict['Value'] return ObjectProxy(item, tags=tags_dict) return item
python
def convert_tags_to_dict(item): """ Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly. """ if hasattr(item, 'tags'): tags = item.tags if isinstance(tags, list): tags_dict = {} for kv_dict in tags: if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict: tags_dict[kv_dict['Key']] = kv_dict['Value'] return ObjectProxy(item, tags=tags_dict) return item
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L151-L167
lebinh/aq
aq/engines.py
BotoSqliteEngine.load_tables
def load_tables(self, query, meta): """ Load necessary resources tables into db to execute given query. """ try: for table in meta.tables: self.load_table(table) except NoCredentialsError: help_link = 'http://boto3.readthedocs.io/en/latest/guide/configuration.html' raise QueryError('Unable to locate AWS credential. ' 'Please see {0} on how to configure AWS credential.'.format(help_link))
python
def load_tables(self, query, meta): """ Load necessary resources tables into db to execute given query. """ try: for table in meta.tables: self.load_table(table) except NoCredentialsError: help_link = 'http://boto3.readthedocs.io/en/latest/guide/configuration.html' raise QueryError('Unable to locate AWS credential. ' 'Please see {0} on how to configure AWS credential.'.format(help_link))
Load necessary resources tables into db to execute given query.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L63-L73
lebinh/aq
aq/engines.py
BotoSqliteEngine.load_table
def load_table(self, table): """ Load resources as specified by given table into our db. """ region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) # we use underscore "_" instead of dash "-" for region name but boto3 need dash boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
python
def load_table(self, table): """ Load resources as specified by given table into our db. """ region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) # we use underscore "_" instead of dash "-" for region name but boto3 need dash boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
Load resources as specified by given table into our db.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L75-L89
lebinh/aq
aq/sqlite_util.py
json_serialize
def json_serialize(obj): """ Simple generic JSON serializer for common objects. """ if isinstance(obj, datetime): return obj.isoformat() if hasattr(obj, 'id'): return jsonify(obj.id) if hasattr(obj, 'name'): return jsonify(obj.name) raise TypeError('{0} is not JSON serializable'.format(obj))
python
def json_serialize(obj): """ Simple generic JSON serializer for common objects. """ if isinstance(obj, datetime): return obj.isoformat() if hasattr(obj, 'id'): return jsonify(obj.id) if hasattr(obj, 'name'): return jsonify(obj.name) raise TypeError('{0} is not JSON serializable'.format(obj))
Simple generic JSON serializer for common objects.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/sqlite_util.py#L20-L33
lebinh/aq
aq/sqlite_util.py
json_get
def json_get(serialized_object, field): """ This emulates the HSTORE `->` get value operation. It get value from JSON serialized column by given key and return `null` if not present. Key can be either an integer for array index access or a string for object field access. :return: JSON serialized value of key in object """ # return null if serialized_object is null or "serialized null" if serialized_object is None: return None obj = json.loads(serialized_object) if obj is None: return None if isinstance(field, int): # array index access res = obj[field] if 0 <= field < len(obj) else None else: # object field access res = obj.get(field) if not isinstance(res, (int, float, string_types)): res = json.dumps(res) return res
python
def json_get(serialized_object, field): """ This emulates the HSTORE `->` get value operation. It get value from JSON serialized column by given key and return `null` if not present. Key can be either an integer for array index access or a string for object field access. :return: JSON serialized value of key in object """ # return null if serialized_object is null or "serialized null" if serialized_object is None: return None obj = json.loads(serialized_object) if obj is None: return None if isinstance(field, int): # array index access res = obj[field] if 0 <= field < len(obj) else None else: # object field access res = obj.get(field) if not isinstance(res, (int, float, string_types)): res = json.dumps(res) return res
This emulates the HSTORE `->` get value operation. It get value from JSON serialized column by given key and return `null` if not present. Key can be either an integer for array index access or a string for object field access. :return: JSON serialized value of key in object
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/sqlite_util.py#L36-L61
lebinh/aq
aq/sqlite_util.py
create_table
def create_table(db, schema_name, table_name, columns): """ Create a table, schema_name.table_name, in given database with given list of column names. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name db.execute('DROP TABLE IF EXISTS {0}'.format(table)) columns_list = ', '.join(columns) db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
python
def create_table(db, schema_name, table_name, columns): """ Create a table, schema_name.table_name, in given database with given list of column names. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name db.execute('DROP TABLE IF EXISTS {0}'.format(table)) columns_list = ', '.join(columns) db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
Create a table, schema_name.table_name, in given database with given list of column names.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/sqlite_util.py#L64-L71
lebinh/aq
aq/sqlite_util.py
insert_all
def insert_all(db, schema_name, table_name, columns, items): """ Insert all item in given items list into the specified table, schema_name.table_name. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name columns_list = ', '.join(columns) values_list = ', '.join(['?'] * len(columns)) query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format( table=table, columns=columns_list, values=values_list) for item in items: values = [getattr(item, col) for col in columns] db.execute(query, values)
python
def insert_all(db, schema_name, table_name, columns, items): """ Insert all item in given items list into the specified table, schema_name.table_name. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name columns_list = ', '.join(columns) values_list = ', '.join(['?'] * len(columns)) query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format( table=table, columns=columns_list, values=values_list) for item in items: values = [getattr(item, col) for col in columns] db.execute(query, values)
Insert all item in given items list into the specified table, schema_name.table_name.
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/sqlite_util.py#L74-L85
gpennington/PyMarvel
marvel/creator.py
Creator.get_comics
def get_comics(self, *args, **kwargs): """ Returns a full ComicDataWrapper object for this creator. /creators/{creatorId}/comics :returns: ComicDataWrapper -- A new request to API. Contains full results set. """ from .comic import Comic, ComicDataWrapper return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
python
def get_comics(self, *args, **kwargs): """ Returns a full ComicDataWrapper object for this creator. /creators/{creatorId}/comics :returns: ComicDataWrapper -- A new request to API. Contains full results set. """ from .comic import Comic, ComicDataWrapper return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
Returns a full ComicDataWrapper object for this creator. /creators/{creatorId}/comics :returns: ComicDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L140-L149
gpennington/PyMarvel
marvel/creator.py
Creator.get_events
def get_events(self, *args, **kwargs): """ Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set. """ from .event import Event, EventDataWrapper return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
python
def get_events(self, *args, **kwargs): """ Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set. """ from .event import Event, EventDataWrapper return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L151-L160
gpennington/PyMarvel
marvel/creator.py
Creator.get_series
def get_series(self, *args, **kwargs): """ Returns a full SeriesDataWrapper object for this creator. /creators/{creatorId}/series :returns: SeriesDataWrapper -- A new request to API. Contains full results set. """ from .series import Series, SeriesDataWrapper return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
python
def get_series(self, *args, **kwargs): """ Returns a full SeriesDataWrapper object for this creator. /creators/{creatorId}/series :returns: SeriesDataWrapper -- A new request to API. Contains full results set. """ from .series import Series, SeriesDataWrapper return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
Returns a full SeriesDataWrapper object for this creator. /creators/{creatorId}/series :returns: SeriesDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L162-L171
gpennington/PyMarvel
marvel/creator.py
Creator.get_stories
def get_stories(self, *args, **kwargs): """ Returns a full StoryDataWrapper object for this creator. /creators/{creatorId}/stories :returns: StoriesDataWrapper -- A new request to API. Contains full results set. """ from .story import Story, StoryDataWrapper return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
python
def get_stories(self, *args, **kwargs): """ Returns a full StoryDataWrapper object for this creator. /creators/{creatorId}/stories :returns: StoriesDataWrapper -- A new request to API. Contains full results set. """ from .story import Story, StoryDataWrapper return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
Returns a full StoryDataWrapper object for this creator. /creators/{creatorId}/stories :returns: StoriesDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L173-L182
gpennington/PyMarvel
marvel/core.py
MarvelObject.list_to_instance_list
def list_to_instance_list(_self, _list, _Class): """ Takes a list of resource dicts and returns a list of resource instances, defined by the _Class param. :param _self: Original resource calling the method :type _self: core.MarvelObject :param _list: List of dicts describing a Resource. :type _list: list :param _Class: The Resource class to create a list of (Comic, Creator, etc). :type _Class: core.MarvelObject :returns: list -- List of Resource instances (Comic, Creator, etc). """ items = [] for item in _list: items.append(_Class(_self.marvel, item)) return items
python
def list_to_instance_list(_self, _list, _Class): """ Takes a list of resource dicts and returns a list of resource instances, defined by the _Class param. :param _self: Original resource calling the method :type _self: core.MarvelObject :param _list: List of dicts describing a Resource. :type _list: list :param _Class: The Resource class to create a list of (Comic, Creator, etc). :type _Class: core.MarvelObject :returns: list -- List of Resource instances (Comic, Creator, etc). """ items = [] for item in _list: items.append(_Class(_self.marvel, item)) return items
Takes a list of resource dicts and returns a list of resource instances, defined by the _Class param. :param _self: Original resource calling the method :type _self: core.MarvelObject :param _list: List of dicts describing a Resource. :type _list: list :param _Class: The Resource class to create a list of (Comic, Creator, etc). :type _Class: core.MarvelObject :returns: list -- List of Resource instances (Comic, Creator, etc).
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/core.py#L39-L56
gpennington/PyMarvel
marvel/core.py
MarvelObject.get_related_resource
def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs): """ Takes a related resource Class and returns the related resource DataWrapper. For Example: Given a Character instance, return a ComicsDataWrapper related to that character. /character/{characterId}/comics :param _Class: The Resource class retrieve :type _Class: core.MarvelObject :param _ClassDataWrapper: The Resource response object :type _Class: core.MarvelObject :param kwargs: dict of query params for the API :type kwargs: dict :returns: DataWrapper -- DataWrapper for requested Resource """ url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url()) response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text) return _ClassDataWrapper(_self.marvel, response)
python
def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs): """ Takes a related resource Class and returns the related resource DataWrapper. For Example: Given a Character instance, return a ComicsDataWrapper related to that character. /character/{characterId}/comics :param _Class: The Resource class retrieve :type _Class: core.MarvelObject :param _ClassDataWrapper: The Resource response object :type _Class: core.MarvelObject :param kwargs: dict of query params for the API :type kwargs: dict :returns: DataWrapper -- DataWrapper for requested Resource """ url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url()) response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text) return _ClassDataWrapper(_self.marvel, response)
Takes a related resource Class and returns the related resource DataWrapper. For Example: Given a Character instance, return a ComicsDataWrapper related to that character. /character/{characterId}/comics :param _Class: The Resource class retrieve :type _Class: core.MarvelObject :param _ClassDataWrapper: The Resource response object :type _Class: core.MarvelObject :param kwargs: dict of query params for the API :type kwargs: dict :returns: DataWrapper -- DataWrapper for requested Resource
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/core.py#L58-L77
gpennington/PyMarvel
marvel/character.py
CharacterDataWrapper.next
def next(self): """ Returns new CharacterDataWrapper TODO: Don't raise offset past count - limit """ self.params['offset'] = str(int(self.params['offset']) + int(self.params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
python
def next(self): """ Returns new CharacterDataWrapper TODO: Don't raise offset past count - limit """ self.params['offset'] = str(int(self.params['offset']) + int(self.params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
Returns new CharacterDataWrapper TODO: Don't raise offset past count - limit
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/character.py#L15-L21
gpennington/PyMarvel
marvel/character.py
CharacterDataWrapper.previous
def previous(self): """ Returns new CharacterDataWrapper TODO: Don't lower offset below 0 """ self.params['offset'] = str(int(self.params['offset']) - int(self.params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
python
def previous(self): """ Returns new CharacterDataWrapper TODO: Don't lower offset below 0 """ self.params['offset'] = str(int(self.params['offset']) - int(self.params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
Returns new CharacterDataWrapper TODO: Don't lower offset below 0
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/character.py#L23-L29
gpennington/PyMarvel
marvel/marvel.py
Marvel._call
def _call(self, resource_url, params=None): """ Calls the Marvel API endpoint :param resource_url: url slug of the resource :type resource_url: str :param params: query params to add to endpoint :type params: str :returns: response -- Requests response """ url = "%s%s" % (self._endpoint(), resource_url) if params: url += "?%s&%s" % (params, self._auth()) else: url += "?%s" % self._auth() return requests.get(url)
python
def _call(self, resource_url, params=None): """ Calls the Marvel API endpoint :param resource_url: url slug of the resource :type resource_url: str :param params: query params to add to endpoint :type params: str :returns: response -- Requests response """ url = "%s%s" % (self._endpoint(), resource_url) if params: url += "?%s&%s" % (params, self._auth()) else: url += "?%s" % self._auth() return requests.get(url)
Calls the Marvel API endpoint :param resource_url: url slug of the resource :type resource_url: str :param params: query params to add to endpoint :type params: str :returns: response -- Requests response
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L38-L55
gpennington/PyMarvel
marvel/marvel.py
Marvel._auth
def _auth(self): """ Creates hash from api keys and returns all required parametsrs :returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash" """ ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S") hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest() return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
python
def _auth(self): """ Creates hash from api keys and returns all required parametsrs :returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash" """ ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S") hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest() return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
Creates hash from api keys and returns all required parametsrs :returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L69-L77
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_character
def get_character(self, id): """Fetches a single character by id. get /v1/public/characters :param id: ID of Character :type params: int :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_character(1009718) >>> print cdw.data.count 1 >>> print cdw.data.results[0].name Wolverine """ url = "%s/%s" % (Character.resource_url(), id) response = json.loads(self._call(url).text) return CharacterDataWrapper(self, response)
python
def get_character(self, id): """Fetches a single character by id. get /v1/public/characters :param id: ID of Character :type params: int :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_character(1009718) >>> print cdw.data.count 1 >>> print cdw.data.results[0].name Wolverine """ url = "%s/%s" % (Character.resource_url(), id) response = json.loads(self._call(url).text) return CharacterDataWrapper(self, response)
Fetches a single character by id. get /v1/public/characters :param id: ID of Character :type params: int :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_character(1009718) >>> print cdw.data.count 1 >>> print cdw.data.results[0].name Wolverine
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L84-L104
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_characters
def get_characters(self, *args, **kwargs): """Fetches lists of comic characters with optional filters. get /v1/public/characters/{characterId} :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15") >>> print cdw.data.count 1401 >>> for result in cdw.data.results: ... print result.name Aginar Air-Walker (Gabriel Lan) Ajak Ajaxis Akemi """ #pass url string and params string to _call response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text) return CharacterDataWrapper(self, response, kwargs)
python
def get_characters(self, *args, **kwargs): """Fetches lists of comic characters with optional filters. get /v1/public/characters/{characterId} :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15") >>> print cdw.data.count 1401 >>> for result in cdw.data.results: ... print result.name Aginar Air-Walker (Gabriel Lan) Ajak Ajaxis Akemi """ #pass url string and params string to _call response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text) return CharacterDataWrapper(self, response, kwargs)
Fetches lists of comic characters with optional filters. get /v1/public/characters/{characterId} :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15") >>> print cdw.data.count 1401 >>> for result in cdw.data.results: ... print result.name Aginar Air-Walker (Gabriel Lan) Ajak Ajaxis Akemi
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L106-L128
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_comic
def get_comic(self, id): """Fetches a single comic by id. get /v1/public/comics/{comicId} :param id: ID of Comic :type params: int :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comic(1009718) >>> print cdw.data.count 1 >>> print cdw.data.result.name Some Comic """ url = "%s/%s" % (Comic.resource_url(), id) response = json.loads(self._call(url).text) return ComicDataWrapper(self, response)
python
def get_comic(self, id): """Fetches a single comic by id. get /v1/public/comics/{comicId} :param id: ID of Comic :type params: int :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comic(1009718) >>> print cdw.data.count 1 >>> print cdw.data.result.name Some Comic """ url = "%s/%s" % (Comic.resource_url(), id) response = json.loads(self._call(url).text) return ComicDataWrapper(self, response)
Fetches a single comic by id. get /v1/public/comics/{comicId} :param id: ID of Comic :type params: int :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comic(1009718) >>> print cdw.data.count 1 >>> print cdw.data.result.name Some Comic
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L130-L150
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_comics
def get_comics(self, *args, **kwargs): """ Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic """ response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text) return ComicDataWrapper(self, response)
python
def get_comics(self, *args, **kwargs): """ Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic """ response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text) return ComicDataWrapper(self, response)
Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L152-L170
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_creator
def get_creator(self, id): """Fetches a single creator by id. get /v1/public/creators/{creatorId} :param id: ID of Creator :type params: int :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creator(30) >>> print cdw.data.count 1 >>> print cdw.data.result.fullName Stan Lee """ url = "%s/%s" % (Creator.resource_url(), id) response = json.loads(self._call(url).text) return CreatorDataWrapper(self, response)
python
def get_creator(self, id): """Fetches a single creator by id. get /v1/public/creators/{creatorId} :param id: ID of Creator :type params: int :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creator(30) >>> print cdw.data.count 1 >>> print cdw.data.result.fullName Stan Lee """ url = "%s/%s" % (Creator.resource_url(), id) response = json.loads(self._call(url).text) return CreatorDataWrapper(self, response)
Fetches a single creator by id. get /v1/public/creators/{creatorId} :param id: ID of Creator :type params: int :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creator(30) >>> print cdw.data.count 1 >>> print cdw.data.result.fullName Stan Lee
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L173-L193
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_creators
def get_creators(self, *args, **kwargs): """Fetches lists of creators. get /v1/public/creators :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15") >>> print cdw.data.total 25 >>> print cdw.data.results[0].fullName Alvin Lee """ response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text) return CreatorDataWrapper(self, response)
python
def get_creators(self, *args, **kwargs): """Fetches lists of creators. get /v1/public/creators :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15") >>> print cdw.data.total 25 >>> print cdw.data.results[0].fullName Alvin Lee """ response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text) return CreatorDataWrapper(self, response)
Fetches lists of creators. get /v1/public/creators :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15") >>> print cdw.data.total 25 >>> print cdw.data.results[0].fullName Alvin Lee
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L196-L212
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_event
def get_event(self, id): """Fetches a single event by id. get /v1/public/event/{eventId} :param id: ID of Event :type params: int :returns: EventDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_event(253) >>> print response.data.result.title Infinity Gauntlet """ url = "%s/%s" % (Event.resource_url(), id) response = json.loads(self._call(url).text) return EventDataWrapper(self, response)
python
def get_event(self, id): """Fetches a single event by id. get /v1/public/event/{eventId} :param id: ID of Event :type params: int :returns: EventDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_event(253) >>> print response.data.result.title Infinity Gauntlet """ url = "%s/%s" % (Event.resource_url(), id) response = json.loads(self._call(url).text) return EventDataWrapper(self, response)
Fetches a single event by id. get /v1/public/event/{eventId} :param id: ID of Event :type params: int :returns: EventDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_event(253) >>> print response.data.result.title Infinity Gauntlet
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L215-L233
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_events
def get_events(self, *args, **kwargs): """Fetches lists of events. get /v1/public/events :returns: EventDataWrapper >>> #Find all the events that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_events(characters="1009351,1009718") >>> print response.data.total 38 >>> events = response.data.results >>> print events[1].title Age of Apocalypse """ response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text) return EventDataWrapper(self, response)
python
def get_events(self, *args, **kwargs): """Fetches lists of events. get /v1/public/events :returns: EventDataWrapper >>> #Find all the events that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_events(characters="1009351,1009718") >>> print response.data.total 38 >>> events = response.data.results >>> print events[1].title Age of Apocalypse """ response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text) return EventDataWrapper(self, response)
Fetches lists of events. get /v1/public/events :returns: EventDataWrapper >>> #Find all the events that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_events(characters="1009351,1009718") >>> print response.data.total 38 >>> events = response.data.results >>> print events[1].title Age of Apocalypse
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L236-L256
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_single_series
def get_single_series(self, id): """Fetches a single comic series by id. get /v1/public/series/{seriesId} :param id: ID of Series :type params: int :returns: SeriesDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_single_series(12429) >>> print response.data.result.title 5 Ronin (2010) """ url = "%s/%s" % (Series.resource_url(), id) response = json.loads(self._call(url).text) return SeriesDataWrapper(self, response)
python
def get_single_series(self, id): """Fetches a single comic series by id. get /v1/public/series/{seriesId} :param id: ID of Series :type params: int :returns: SeriesDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_single_series(12429) >>> print response.data.result.title 5 Ronin (2010) """ url = "%s/%s" % (Series.resource_url(), id) response = json.loads(self._call(url).text) return SeriesDataWrapper(self, response)
Fetches a single comic series by id. get /v1/public/series/{seriesId} :param id: ID of Series :type params: int :returns: SeriesDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_single_series(12429) >>> print response.data.result.title 5 Ronin (2010)
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L259-L277
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_series
def get_series(self, *args, **kwargs): """Fetches lists of events. get /v1/public/events :returns: SeriesDataWrapper >>> #Find all the series that involved Wolverine >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_series(characters="1009718") >>> print response.data.total 435 >>> series = response.data.results >>> print series[0].title 5 Ronin (2010) """ response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text) return SeriesDataWrapper(self, response)
python
def get_series(self, *args, **kwargs): """Fetches lists of events. get /v1/public/events :returns: SeriesDataWrapper >>> #Find all the series that involved Wolverine >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_series(characters="1009718") >>> print response.data.total 435 >>> series = response.data.results >>> print series[0].title 5 Ronin (2010) """ response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text) return SeriesDataWrapper(self, response)
Fetches lists of events. get /v1/public/events :returns: SeriesDataWrapper >>> #Find all the series that involved Wolverine >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_series(characters="1009718") >>> print response.data.total 435 >>> series = response.data.results >>> print series[0].title 5 Ronin (2010)
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L280-L299
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_story
def get_story(self, id): """Fetches a single story by id. get /v1/public/stories/{storyId} :param id: ID of Story :type params: int :returns: StoryDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_story(29) >>> print response.data.result.title Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself... """ url = "%s/%s" % (Story.resource_url(), id) response = json.loads(self._call(url).text) return StoryDataWrapper(self, response)
python
def get_story(self, id): """Fetches a single story by id. get /v1/public/stories/{storyId} :param id: ID of Story :type params: int :returns: StoryDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_story(29) >>> print response.data.result.title Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself... """ url = "%s/%s" % (Story.resource_url(), id) response = json.loads(self._call(url).text) return StoryDataWrapper(self, response)
Fetches a single story by id. get /v1/public/stories/{storyId} :param id: ID of Story :type params: int :returns: StoryDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_story(29) >>> print response.data.result.title Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L301-L319
gpennington/PyMarvel
marvel/marvel.py
Marvel.get_stories
def get_stories(self, *args, **kwargs): """Fetches lists of stories. get /v1/public/stories :returns: StoryDataWrapper >>> #Find all the stories that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_stories(characters="1009351,1009718") >>> print response.data.total 4066 >>> stories = response.data.results >>> print stories[1].title Cover #477 """ response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text) return StoryDataWrapper(self, response)
python
def get_stories(self, *args, **kwargs): """Fetches lists of stories. get /v1/public/stories :returns: StoryDataWrapper >>> #Find all the stories that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_stories(characters="1009351,1009718") >>> print response.data.total 4066 >>> stories = response.data.results >>> print stories[1].title Cover #477 """ response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text) return StoryDataWrapper(self, response)
Fetches lists of stories. get /v1/public/stories :returns: StoryDataWrapper >>> #Find all the stories that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_stories(characters="1009351,1009718") >>> print response.data.total 4066 >>> stories = response.data.results >>> print stories[1].title Cover #477
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L322-L342
gpennington/PyMarvel
marvel/story.py
Story.get_creators
def get_creators(self, *args, **kwargs): """ Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set. """ from .creator import Creator, CreatorDataWrapper return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
python
def get_creators(self, *args, **kwargs): """ Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set. """ from .creator import Creator, CreatorDataWrapper return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/story.py#L91-L100
gpennington/PyMarvel
marvel/story.py
Story.get_characters
def get_characters(self, *args, **kwargs): """ Returns a full CharacterDataWrapper object for this story. /stories/{storyId}/characters :returns: CharacterDataWrapper -- A new request to API. Contains full results set. """ from .character import Character, CharacterDataWrapper return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
python
def get_characters(self, *args, **kwargs): """ Returns a full CharacterDataWrapper object for this story. /stories/{storyId}/characters :returns: CharacterDataWrapper -- A new request to API. Contains full results set. """ from .character import Character, CharacterDataWrapper return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
Returns a full CharacterDataWrapper object for this story. /stories/{storyId}/characters :returns: CharacterDataWrapper -- A new request to API. Contains full results set.
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/story.py#L102-L111
faroit/stempeg
stempeg/__init__.py
ffmpeg_version
def ffmpeg_version(): """Returns the available ffmpeg version Returns ---------- version : str version number as string """ cmd = [ 'ffmpeg', '-version' ] output = sp.check_output(cmd) aac_codecs = [ x for x in output.splitlines() if "ffmpeg version " in str(x) ][0] hay = aac_codecs.decode('ascii') match = re.findall(r'ffmpeg version (\d+\.)?(\d+\.)?(\*|\d+)', hay) if match: return "".join(match[0]) else: return None
python
def ffmpeg_version(): """Returns the available ffmpeg version Returns ---------- version : str version number as string """ cmd = [ 'ffmpeg', '-version' ] output = sp.check_output(cmd) aac_codecs = [ x for x in output.splitlines() if "ffmpeg version " in str(x) ][0] hay = aac_codecs.decode('ascii') match = re.findall(r'ffmpeg version (\d+\.)?(\d+\.)?(\*|\d+)', hay) if match: return "".join(match[0]) else: return None
Returns the available ffmpeg version Returns ---------- version : str version number as string
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/__init__.py#L32-L56
faroit/stempeg
stempeg/__init__.py
cli
def cli(inargs=None): """ Commandline interface for receiving stem files """ parser = argparse.ArgumentParser() parser.add_argument( '--version', '-V', action='version', version='%%(prog)s %s' % __version__ ) parser.add_argument( 'filename', metavar="filename", help="Input STEM file" ) parser.add_argument( '--id', metavar='id', type=int, nargs='+', help="A list of stem_ids" ) parser.add_argument( '-s', type=float, nargs='?', help="start offset in seconds" ) parser.add_argument( '-t', type=float, nargs='?', help="read duration" ) parser.add_argument( 'outdir', metavar='outdir', nargs='?', help="Output folder" ) args = parser.parse_args(inargs) stem2wav(args.filename, args.outdir, args.id, args.s, args.t)
python
def cli(inargs=None): """ Commandline interface for receiving stem files """ parser = argparse.ArgumentParser() parser.add_argument( '--version', '-V', action='version', version='%%(prog)s %s' % __version__ ) parser.add_argument( 'filename', metavar="filename", help="Input STEM file" ) parser.add_argument( '--id', metavar='id', type=int, nargs='+', help="A list of stem_ids" ) parser.add_argument( '-s', type=float, nargs='?', help="start offset in seconds" ) parser.add_argument( '-t', type=float, nargs='?', help="read duration" ) parser.add_argument( 'outdir', metavar='outdir', nargs='?', help="Output folder" ) args = parser.parse_args(inargs) stem2wav(args.filename, args.outdir, args.id, args.s, args.t)
Commandline interface for receiving stem files
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/__init__.py#L59-L108
faroit/stempeg
stempeg/write.py
check_available_aac_encoders
def check_available_aac_encoders(): """Returns the available AAC encoders Returns ---------- codecs : list(str) List of available encoder codecs """ cmd = [ 'ffmpeg', '-v', 'error', '-codecs' ] output = sp.check_output(cmd) aac_codecs = [ x for x in output.splitlines() if "AAC (Advanced Audio Coding)" in str(x) ][0] hay = aac_codecs.decode('ascii') match = re.findall(r'\(encoders: ([^\)]*) \)', hay) if match: return match[0].split(" ") else: return None
python
def check_available_aac_encoders(): """Returns the available AAC encoders Returns ---------- codecs : list(str) List of available encoder codecs """ cmd = [ 'ffmpeg', '-v', 'error', '-codecs' ] output = sp.check_output(cmd) aac_codecs = [ x for x in output.splitlines() if "AAC (Advanced Audio Coding)" in str(x) ][0] hay = aac_codecs.decode('ascii') match = re.findall(r'\(encoders: ([^\)]*) \)', hay) if match: return match[0].split(" ") else: return None
Returns the available AAC encoders Returns ---------- codecs : list(str) List of available encoder codecs
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/write.py#L10-L35
faroit/stempeg
stempeg/write.py
write_stems
def write_stems( audio, filename, rate=44100, bitrate=256000, codec=None, ffmpeg_params=None ): """Write stems from numpy Tensor Parameters ---------- audio : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. filename : str Output file_name of the stems file rate : int Output samplerate. Defaults to 44100 Hz. bitrate : int AAC Bitrate in Bits per second. Defaults to 256 Kbit/s codec : str AAC codec used. Defaults to `None` which automatically selects either `libfdk_aac` or `aac` in that order, determined by availability. ffmpeg_params : list(str) List of additional ffmpeg parameters Notes ----- Output is written as 16bit/44.1 kHz """ if int(stempeg.ffmpeg_version()[0]) < 3: warnings.warn( "Writing STEMS with FFMPEG version < 3 is unsupported", UserWarning ) if codec is None: avail = check_available_aac_encoders() if avail is not None: if 'libfdk_aac' in avail: codec = 'libfdk_aac' else: codec = 'aac' warnings.warn("For better quality, please install libfdc_aac") else: codec = 'aac' warnings.warn("For better quality, please install libfdc_aac") tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in range(audio.shape[0]) ] if audio.shape[1] % 1024 != 0: warnings.warn( "Number of samples does not divide by 1024, be aware that " "the AAC encoder add silence to the input signal" ) for k in range(audio.shape[0]): sf.write(tmps[k].name, audio[k], rate) cmd = ( [ 'ffmpeg', '-y', "-f", 's%dle' % (16), "-acodec", 'pcm_s%dle' % (16), '-ar', "%d" % rate, '-ac', "%d" % 2 ] + list(chain.from_iterable( [['-i', i.name] for i in tmps] )) + list(chain.from_iterable( [['-map', str(k)] for k, _ in enumerate(tmps)] )) + [ '-vn', '-acodec', codec, '-ar', "%d" % rate, '-strict', '-2', '-loglevel', 'error' ] + (['-ab', str(bitrate)] if (bitrate is not None) else []) + (ffmpeg_params if ffmpeg_params else []) + [filename] ) sp.call(cmd)
python
def write_stems( audio, filename, rate=44100, bitrate=256000, codec=None, ffmpeg_params=None ): """Write stems from numpy Tensor Parameters ---------- audio : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. filename : str Output file_name of the stems file rate : int Output samplerate. Defaults to 44100 Hz. bitrate : int AAC Bitrate in Bits per second. Defaults to 256 Kbit/s codec : str AAC codec used. Defaults to `None` which automatically selects either `libfdk_aac` or `aac` in that order, determined by availability. ffmpeg_params : list(str) List of additional ffmpeg parameters Notes ----- Output is written as 16bit/44.1 kHz """ if int(stempeg.ffmpeg_version()[0]) < 3: warnings.warn( "Writing STEMS with FFMPEG version < 3 is unsupported", UserWarning ) if codec is None: avail = check_available_aac_encoders() if avail is not None: if 'libfdk_aac' in avail: codec = 'libfdk_aac' else: codec = 'aac' warnings.warn("For better quality, please install libfdc_aac") else: codec = 'aac' warnings.warn("For better quality, please install libfdc_aac") tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in range(audio.shape[0]) ] if audio.shape[1] % 1024 != 0: warnings.warn( "Number of samples does not divide by 1024, be aware that " "the AAC encoder add silence to the input signal" ) for k in range(audio.shape[0]): sf.write(tmps[k].name, audio[k], rate) cmd = ( [ 'ffmpeg', '-y', "-f", 's%dle' % (16), "-acodec", 'pcm_s%dle' % (16), '-ar', "%d" % rate, '-ac', "%d" % 2 ] + list(chain.from_iterable( [['-i', i.name] for i in tmps] )) + list(chain.from_iterable( [['-map', str(k)] for k, _ in enumerate(tmps)] )) + [ '-vn', '-acodec', codec, '-ar', "%d" % rate, '-strict', '-2', '-loglevel', 'error' ] + (['-ab', str(bitrate)] if (bitrate is not None) else []) + (ffmpeg_params if ffmpeg_params else []) + [filename] ) sp.call(cmd)
Write stems from numpy Tensor Parameters ---------- audio : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. filename : str Output file_name of the stems file rate : int Output samplerate. Defaults to 44100 Hz. bitrate : int AAC Bitrate in Bits per second. Defaults to 256 Kbit/s codec : str AAC codec used. Defaults to `None` which automatically selects either `libfdk_aac` or `aac` in that order, determined by availability. ffmpeg_params : list(str) List of additional ffmpeg parameters Notes ----- Output is written as 16bit/44.1 kHz
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/write.py#L38-L128
faroit/stempeg
stempeg/read.py
read_info
def read_info( filename ): """Extracts FFMPEG info and returns info as JSON Returns ------- info : Dict JSON info dict """ cmd = [ 'ffprobe', filename, '-v', 'error', '-print_format', 'json', '-show_format', '-show_streams', ] out = sp.check_output(cmd) info = json.loads(out.decode('utf-8')) return info
python
def read_info( filename ): """Extracts FFMPEG info and returns info as JSON Returns ------- info : Dict JSON info dict """ cmd = [ 'ffprobe', filename, '-v', 'error', '-print_format', 'json', '-show_format', '-show_streams', ] out = sp.check_output(cmd) info = json.loads(out.decode('utf-8')) return info
Extracts FFMPEG info and returns info as JSON Returns ------- info : Dict JSON info dict
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/read.py#L56-L77
faroit/stempeg
stempeg/read.py
read_stems
def read_stems( filename, out_type=np.float_, stem_id=None, start=0, duration=None, info=None ): """Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz """ if info is None: FFinfo = Info(filename) else: FFinfo = info if stem_id is not None: substreams = stem_id else: substreams = FFinfo.audio_stream_idx() if not isinstance(substreams, list): substreams = [substreams] stems = [] tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in substreams ] for tmp_id, stem in enumerate(substreams): rate = FFinfo.rate(stem) channels = FFinfo.channels(stem) cmd = [ 'ffmpeg', '-y', '-vn', '-i', filename, '-map', '0:' + str(stem), '-acodec', 'pcm_s16le', '-ar', str(rate), '-ac', str(channels), '-loglevel', 'error', tmps[tmp_id].name ] if start: cmd.insert(3, '-ss') cmd.insert(4, str(start)) if duration is not None: cmd.insert(-1, '-t') cmd.insert(-1, str(duration)) sp.call(cmd) # read wav files audio, rate = sf.read(tmps[tmp_id].name) tmps[tmp_id].close() os.remove(tmps[tmp_id].name) stems.append(audio) # check if all stems have the same duration stem_durations = np.array([t.shape[0] for t in stems]) if not (stem_durations == stem_durations[0]).all(): warnings.warn("Warning.......Stems differ in length and were shortend") min_length = np.min(stem_durations) stems = [t[:min_length, :] for t in stems] stems = np.array(stems) stems = np.squeeze(stems).astype(out_type) return stems, rate
python
def read_stems( filename, out_type=np.float_, stem_id=None, start=0, duration=None, info=None ): """Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz """ if info is None: FFinfo = Info(filename) else: FFinfo = info if stem_id is not None: substreams = stem_id else: substreams = FFinfo.audio_stream_idx() if not isinstance(substreams, list): substreams = [substreams] stems = [] tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in substreams ] for tmp_id, stem in enumerate(substreams): rate = FFinfo.rate(stem) channels = FFinfo.channels(stem) cmd = [ 'ffmpeg', '-y', '-vn', '-i', filename, '-map', '0:' + str(stem), '-acodec', 'pcm_s16le', '-ar', str(rate), '-ac', str(channels), '-loglevel', 'error', tmps[tmp_id].name ] if start: cmd.insert(3, '-ss') cmd.insert(4, str(start)) if duration is not None: cmd.insert(-1, '-t') cmd.insert(-1, str(duration)) sp.call(cmd) # read wav files audio, rate = sf.read(tmps[tmp_id].name) tmps[tmp_id].close() os.remove(tmps[tmp_id].name) stems.append(audio) # check if all stems have the same duration stem_durations = np.array([t.shape[0] for t in stems]) if not (stem_durations == stem_durations[0]).all(): warnings.warn("Warning.......Stems differ in length and were shortend") min_length = np.min(stem_durations) stems = [t[:min_length, :] for t in stems] stems = np.array(stems) stems = np.squeeze(stems).astype(out_type) return stems, rate
Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz
https://github.com/faroit/stempeg/blob/ebbaec87ea440fcbb06423d708e7847749e63d38/stempeg/read.py#L80-L176
titusjan/argos
argos/repo/rtiplugins/pandasio.py
PandasIndexRti.nDims
def nDims(self): """ The number of dimensions of the index. Will always be 1. """ result = self._index.ndim assert result == 1, "Expected index to be 1D, got: {}D".format(result) return result
python
def nDims(self): """ The number of dimensions of the index. Will always be 1. """ result = self._index.ndim assert result == 1, "Expected index to be 1D, got: {}D".format(result) return result
The number of dimensions of the index. Will always be 1.
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pandasio.py#L80-L85
titusjan/argos
argos/repo/rtiplugins/pandasio.py
AbstractPandasNDFrameRti.elementTypeName
def elementTypeName(self): """ String representation of the element type. """ if self._ndFrame is None: return super(AbstractPandasNDFrameRti, self).elementTypeName else: try: return str(self._ndFrame.dtype) # Series except AttributeError: return '<structured>'
python
def elementTypeName(self): """ String representation of the element type. """ if self._ndFrame is None: return super(AbstractPandasNDFrameRti, self).elementTypeName else: try: return str(self._ndFrame.dtype) # Series except AttributeError: return '<structured>'
String representation of the element type.
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pandasio.py#L196-L205
titusjan/argos
argos/repo/rtiplugins/pandasio.py
AbstractPandasNDFrameRti._createIndexRti
def _createIndexRti(self, index, nodeName): """ Auxiliary method that creates a PandasIndexRti. """ return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName, iconColor=self._iconColor)
python
def _createIndexRti(self, index, nodeName): """ Auxiliary method that creates a PandasIndexRti. """ return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName, iconColor=self._iconColor)
Auxiliary method that creates a PandasIndexRti.
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pandasio.py#L208-L212
titusjan/argos
argos/repo/rtiplugins/pandasio.py
PandasSeriesRti._fetchAllChildren
def _fetchAllChildren(self): """ Fetches the index if the showIndex member is True Descendants can override this function to add the subdevicions. """ assert self.isSliceable, "No underlying pandas object: self._ndFrame is None" childItems = [] if self._standAlone: childItems.append(self._createIndexRti(self._ndFrame.index, 'index')) return childItems
python
def _fetchAllChildren(self): """ Fetches the index if the showIndex member is True Descendants can override this function to add the subdevicions. """ assert self.isSliceable, "No underlying pandas object: self._ndFrame is None" childItems = [] if self._standAlone: childItems.append(self._createIndexRti(self._ndFrame.index, 'index')) return childItems
Fetches the index if the showIndex member is True Descendants can override this function to add the subdevicions.
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pandasio.py#L234-L243