sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def generateVariantAnnotation(self, variant): """ Generate a random variant annotation based on a given variant. This generator should be seeded with a value that is unique to the variant so that the same annotation will always be produced regardless of the order it is generated in. """ # To make this reproducible, make a seed based on this # specific variant. seed = self._randomSeed + variant.start + variant.end randomNumberGenerator = random.Random() randomNumberGenerator.seed(seed) ann = protocol.VariantAnnotation() ann.variant_annotation_set_id = str(self.getCompoundId()) ann.variant_id = variant.id ann.created = datetime.datetime.now().isoformat() + "Z" # make a transcript effect for each alternate base element # multiplied by a random integer (1,5) for base in variant.alternate_bases: ann.transcript_effects.add().CopyFrom( self.generateTranscriptEffect( variant, ann, base, randomNumberGenerator)) ann.id = self.getVariantAnnotationId(variant, ann) return ann
Generate a random variant annotation based on a given variant. This generator should be seeded with a value that is unique to the variant so that the same annotation will always be produced regardless of the order it is generated in.
entailment
def populateFromRow(self, annotationSetRecord): """ Populates this VariantAnnotationSet from the specified DB row. """ self._annotationType = annotationSetRecord.annotationtype self._analysis = protocol.fromJson( annotationSetRecord.analysis, protocol.Analysis) self._creationTime = annotationSetRecord.created self._updatedTime = annotationSetRecord.updated self.setAttributesJson(annotationSetRecord.attributes)
Populates this VariantAnnotationSet from the specified DB row.
entailment
def _getAnnotationAnalysis(self, varFile): """ Assembles metadata within the VCF header into a GA4GH Analysis object. :return: protocol.Analysis """ header = varFile.header analysis = protocol.Analysis() formats = header.formats.items() infos = header.info.items() filters = header.filters.items() for prefix, content in [("FORMAT", formats), ("INFO", infos), ("FILTER", filters)]: for contentKey, value in content: key = "{0}.{1}".format(prefix, value.name) if key not in analysis.attributes.attr: analysis.attributes.attr[key].Clear() if value.description is not None: analysis.attributes.attr[ key].values.add().string_value = value.description analysis.created = self._creationTime analysis.updated = self._updatedTime for r in header.records: # Don't add a key to info if there's nothing in the value if r.value is not None: if r.key not in analysis.attributes.attr: analysis.attributes.attr[r.key].Clear() analysis.attributes.attr[r.key] \ .values.add().string_value = str(r.value) if r.key == "created" or r.key == "fileDate": # TODO handle more date formats try: if '-' in r.value: fmtStr = "%Y-%m-%d" else: fmtStr = "%Y%m%d" analysis.created = datetime.datetime.strptime( r.value, fmtStr).isoformat() + "Z" except ValueError: # is there a logger we should tell? # print("INFO: Could not parse variant annotation time") pass # analysis.create_date_time remains datetime.now() if r.key == "software": analysis.software.append(r.value) if r.key == "name": analysis.name = r.value if r.key == "description": analysis.description = r.value analysis.id = str(datamodel.VariantAnnotationSetAnalysisCompoundId( self._compoundId, "analysis")) return analysis
Assembles metadata within the VCF header into a GA4GH Analysis object. :return: protocol.Analysis
entailment
def getVariantAnnotations(self, referenceName, startPosition, endPosition): """ Generator for iterating through variant annotations in this variant annotation set. :param referenceName: :param startPosition: :param endPosition: :return: generator of protocol.VariantAnnotation """ variantIter = self._variantSet.getPysamVariants( referenceName, startPosition, endPosition) for record in variantIter: yield self.convertVariantAnnotation(record)
Generator for iterating through variant annotations in this variant annotation set. :param referenceName: :param startPosition: :param endPosition: :return: generator of protocol.VariantAnnotation
entailment
def convertLocation(self, pos): """ Accepts a position string (start/length) and returns a GA4GH AlleleLocation with populated fields. :param pos: :return: protocol.AlleleLocation """ if isUnspecified(pos): return None coordLen = pos.split('/') if len(coordLen) > 1: allLoc = self._createGaAlleleLocation() allLoc.start = int(coordLen[0]) - 1 return allLoc return None
Accepts a position string (start/length) and returns a GA4GH AlleleLocation with populated fields. :param pos: :return: protocol.AlleleLocation
entailment
def convertLocationHgvsC(self, hgvsc): """ Accepts an annotation in HGVS notation and returns an AlleleLocation with populated fields. :param hgvsc: :return: """ if isUnspecified(hgvsc): return None match = re.match(".*c.(\d+)(\D+)>(\D+)", hgvsc) if match: pos = int(match.group(1)) if pos > 0: allLoc = self._createGaAlleleLocation() allLoc.start = pos - 1 allLoc.reference_sequence = match.group(2) allLoc.alternate_sequence = match.group(3) return allLoc return None
Accepts an annotation in HGVS notation and returns an AlleleLocation with populated fields. :param hgvsc: :return:
entailment
def convertLocationHgvsP(self, hgvsp): """ Accepts an annotation in HGVS notation and returns an AlleleLocation with populated fields. :param hgvsp: :return: protocol.AlleleLocation """ if isUnspecified(hgvsp): return None match = re.match(".*p.(\D+)(\d+)(\D+)", hgvsp, flags=re.UNICODE) if match is not None: allLoc = self._createGaAlleleLocation() allLoc.reference_sequence = match.group(1) allLoc.start = int(match.group(2)) - 1 allLoc.alternate_sequence = match.group(3) return allLoc return None
Accepts an annotation in HGVS notation and returns an AlleleLocation with populated fields. :param hgvsp: :return: protocol.AlleleLocation
entailment
def addLocations(self, effect, protPos, cdnaPos): """ Adds locations to a GA4GH transcript effect object by parsing HGVS annotation fields in concert with and supplied position values. :param effect: protocol.TranscriptEffect :param protPos: String representing protein position from VCF :param cdnaPos: String representing coding DNA location :return: effect protocol.TranscriptEffect """ self.addCDSLocation(effect, cdnaPos) self.addCDNALocation(effect, cdnaPos) self.addProteinLocation(effect, protPos) return effect
Adds locations to a GA4GH transcript effect object by parsing HGVS annotation fields in concert with and supplied position values. :param effect: protocol.TranscriptEffect :param protPos: String representing protein position from VCF :param cdnaPos: String representing coding DNA location :return: effect protocol.TranscriptEffect
entailment
def convertTranscriptEffect(self, annStr, hgvsG): """ Takes the ANN string of a SnpEff generated VCF, splits it and returns a populated GA4GH transcript effect object. :param annStr: String :param hgvsG: String :return: effect protocol.TranscriptEffect() """ effect = self._createGaTranscriptEffect() effect.hgvs_annotation.CopyFrom(protocol.HGVSAnnotation()) annDict = dict() if self._annotationType == ANNOTATIONS_SNPEFF: annDict = dict(zip(self. SNPEFF_FIELDS, annStr.split("|"))) elif self._annotationType == ANNOTATIONS_VEP_V82: annDict = dict(zip(self.VEP_FIELDS, annStr.split("|"))) else: annDict = dict(zip(self.CSQ_FIELDS, annStr.split("|"))) annDict["hgvs_annotation.genomic"] = hgvsG if hgvsG else u'' for key, val in annDict.items(): try: protocol.deepSetAttr(effect, key, val) except AttributeError: if val and key not in self.EXCLUDED_FIELDS: protocol.setAttribute( effect.attributes.attr[key].values, val) effect.effects.extend(self.convertSeqOntology(annDict.get('effects'))) self.addLocations( effect, annDict.get('protPos'), annDict.get('cdnaPos')) effect.id = self.getTranscriptEffectId(effect) return effect
Takes the ANN string of a SnpEff generated VCF, splits it and returns a populated GA4GH transcript effect object. :param annStr: String :param hgvsG: String :return: effect protocol.TranscriptEffect()
entailment
def convertSeqOntology(self, seqOntStr): """ Splits a string of sequence ontology effects and creates an ontology term record for each, which are built into an array of return soTerms. :param seqOntStr: :return: [protocol.OntologyTerm] """ return [ self._ontology.getGaTermByName(soName) for soName in seqOntStr.split('&')]
Splits a string of sequence ontology effects and creates an ontology term record for each, which are built into an array of return soTerms. :param seqOntStr: :return: [protocol.OntologyTerm]
entailment
def convertVariantAnnotation(self, record): """ Converts the specfied pysam variant record into a GA4GH variant annotation object using the specified function to convert the transcripts. """ variant = self._variantSet.convertVariant(record, []) annotation = self._createGaVariantAnnotation() annotation.variant_id = variant.id gDots = record.info.get(b'HGVS.g') # Convert annotations from INFO field into TranscriptEffect transcriptEffects = [] annotations = record.info.get(b'ANN') or record.info.get(b'CSQ') for i, ann in enumerate(annotations): hgvsG = gDots[i % len(variant.alternate_bases)] if gDots else None transcriptEffects.append(self.convertTranscriptEffect(ann, hgvsG)) annotation.transcript_effects.extend(transcriptEffects) annotation.id = self.getVariantAnnotationId(variant, annotation) return variant, annotation
Converts the specfied pysam variant record into a GA4GH variant annotation object using the specified function to convert the transcripts.
entailment
def _attributeStr(self, name): """ Return name=value for a single attribute """ return "{}={}".format( _encodeAttr(name), ",".join([_encodeAttr(v) for v in self.attributes[name]]))
Return name=value for a single attribute
entailment
def _attributeStrs(self): """ Return name=value, semi-colon-separated string for attributes, including url-style quoting """ return ";".join([self._attributeStr(name) for name in self.attributes.iterkeys()])
Return name=value, semi-colon-separated string for attributes, including url-style quoting
entailment
def featureName(self): """ ID attribute from GFF3 or None if record doesn't have it. Called "Name" rather than "Id" within GA4GH, as there is no guarantee of either uniqueness or existence. """ featId = self.attributes.get("ID") if featId is not None: featId = featId[0] return featId
ID attribute from GFF3 or None if record doesn't have it. Called "Name" rather than "Id" within GA4GH, as there is no guarantee of either uniqueness or existence.
entailment
def _linkFeature(self, feature): """ Link a feature with its parents. """ parentNames = feature.attributes.get("Parent") if parentNames is None: self.roots.add(feature) else: for parentName in parentNames: self._linkToParent(feature, parentName)
Link a feature with its parents.
entailment
def _linkToParent(self, feature, parentName): """ Link a feature with its children """ parentParts = self.byFeatureName.get(parentName) if parentParts is None: raise GFF3Exception( "Parent feature does not exist: {}".format(parentName), self.fileName) # parent maybe disjoint for parentPart in parentParts: feature.parents.add(parentPart) parentPart.children.add(feature)
Link a feature with its children
entailment
def linkChildFeaturesToParents(self): """ finish loading the set, constructing the tree """ # features maybe disjoint for featureParts in self.byFeatureName.itervalues(): for feature in featureParts: self._linkFeature(feature)
finish loading the set, constructing the tree
entailment
def _recSortKey(r): """ Sort order for Features, by genomic coordinate, disambiguated by feature type (alphabetically). """ return r.seqname, r.start, -r.end, r.type
Sort order for Features, by genomic coordinate, disambiguated by feature type (alphabetically).
entailment
def _writeRec(self, fh, rec): """ Writes a single record to a file provided by the filehandle fh. """ fh.write(str(rec) + "\n") for child in sorted(rec.children, key=self._recSortKey): self._writeRec(fh, child)
Writes a single record to a file provided by the filehandle fh.
entailment
def write(self, fh): """ Write set to a GFF3 format file. :param file fh: file handle for file to write to """ fh.write(GFF3_HEADER+"\n") for root in sorted(self.roots, key=self._recSortKey): self._writeRec(fh, root)
Write set to a GFF3 format file. :param file fh: file handle for file to write to
entailment
def _open(self): """ open input file, optionally with decompression """ if self.fileName.endswith(".gz"): return gzip.open(self.fileName) elif self.fileName.endswith(".bz2"): return bz2.BZ2File(self.fileName) else: return open(self.fileName)
open input file, optionally with decompression
entailment
def _parseAttrVal(self, attrStr): """ Returns tuple of tuple of (attr, value), multiple are returned to handle multi-value attributes. """ m = self.SPLIT_ATTR_RE.match(attrStr) if m is None: raise GFF3Exception( "can't parse attribute/value: '" + attrStr + "'", self.fileName, self.lineNumber) name = urllib.unquote(m.group(1)) val = m.group(2) # Split by comma to separate then unquote. # Commas in values must be url encoded. return name, [urllib.unquote(v) for v in val.split(',')]
Returns tuple of tuple of (attr, value), multiple are returned to handle multi-value attributes.
entailment
def _parseAttrs(self, attrsStr): """ Parse the attributes and values """ attributes = dict() for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr): name, vals = self._parseAttrVal(attrStr) if name in attributes: raise GFF3Exception( "duplicated attribute name: {}".format(name), self.fileName, self.lineNumber) attributes[name] = vals return attributes
Parse the attributes and values
entailment
def _parseRecord(self, gff3Set, line): """ Parse one record. """ row = line.split("\t") if len(row) != self.GFF3_NUM_COLS: raise GFF3Exception( "Wrong number of columns, expected {}, got {}".format( self.GFF3_NUM_COLS, len(row)), self.fileName, self.lineNumber) feature = Feature( urllib.unquote(row[0]), urllib.unquote(row[1]), urllib.unquote(row[2]), int(row[3]), int(row[4]), row[5], row[6], row[7], self._parseAttrs(row[8])) gff3Set.add(feature)
Parse one record.
entailment
def parse(self): """ Run the parse and return the resulting Gff3Set object. """ fh = self._open() try: gff3Set = Gff3Set(self.fileName) for line in fh: self.lineNumber += 1 self._parseLine(gff3Set, line[0:-1]) finally: fh.close() gff3Set.linkChildFeaturesToParents() return gff3Set
Run the parse and return the resulting Gff3Set object.
entailment
def addDataset(self, dataset): """ Adds the specified dataset to this data repository. """ id_ = dataset.getId() self._datasetIdMap[id_] = dataset self._datasetNameMap[dataset.getLocalId()] = dataset self._datasetIds.append(id_)
Adds the specified dataset to this data repository.
entailment
def addReferenceSet(self, referenceSet): """ Adds the specified reference set to this data repository. """ id_ = referenceSet.getId() self._referenceSetIdMap[id_] = referenceSet self._referenceSetNameMap[referenceSet.getLocalId()] = referenceSet self._referenceSetIds.append(id_)
Adds the specified reference set to this data repository.
entailment
def addOntology(self, ontology): """ Add an ontology map to this data repository. """ self._ontologyNameMap[ontology.getName()] = ontology self._ontologyIdMap[ontology.getId()] = ontology self._ontologyIds.append(ontology.getId())
Add an ontology map to this data repository.
entailment
def getPeer(self, url): """ Select the first peer in the datarepo with the given url simulating the behavior of selecting by URL. This is only used during testing. """ peers = filter(lambda x: x.getUrl() == url, self.getPeers()) if len(peers) == 0: raise exceptions.PeerNotFoundException(url) return peers[0]
Select the first peer in the datarepo with the given url simulating the behavior of selecting by URL. This is only used during testing.
entailment
def getDataset(self, id_): """ Returns a dataset with the specified ID, or raises a DatasetNotFoundException if it does not exist. """ if id_ not in self._datasetIdMap: raise exceptions.DatasetNotFoundException(id_) return self._datasetIdMap[id_]
Returns a dataset with the specified ID, or raises a DatasetNotFoundException if it does not exist.
entailment
def getDatasetByName(self, name): """ Returns the dataset with the specified name. """ if name not in self._datasetNameMap: raise exceptions.DatasetNameNotFoundException(name) return self._datasetNameMap[name]
Returns the dataset with the specified name.
entailment
def getOntology(self, id_): """ Returns the ontology with the specified ID. """ if id_ not in self._ontologyIdMap: raise exceptions.OntologyNotFoundException(id_) return self._ontologyIdMap[id_]
Returns the ontology with the specified ID.
entailment
def getOntologyByName(self, name): """ Returns an ontology by name """ if name not in self._ontologyNameMap: raise exceptions.OntologyNameNotFoundException(name) return self._ontologyNameMap[name]
Returns an ontology by name
entailment
def getReferenceSet(self, id_): """ Retuns the ReferenceSet with the specified ID, or raises a ReferenceSetNotFoundException if it does not exist. """ if id_ not in self._referenceSetIdMap: raise exceptions.ReferenceSetNotFoundException(id_) return self._referenceSetIdMap[id_]
Retuns the ReferenceSet with the specified ID, or raises a ReferenceSetNotFoundException if it does not exist.
entailment
def getReferenceSetByName(self, name): """ Returns the reference set with the specified name. """ if name not in self._referenceSetNameMap: raise exceptions.ReferenceSetNameNotFoundException(name) return self._referenceSetNameMap[name]
Returns the reference set with the specified name.
entailment
def getReadGroupSet(self, id_): """ Returns the readgroup set with the specified ID. """ compoundId = datamodel.ReadGroupSetCompoundId.parse(id_) dataset = self.getDataset(compoundId.dataset_id) return dataset.getReadGroupSet(id_)
Returns the readgroup set with the specified ID.
entailment
def getVariantSet(self, id_): """ Returns the readgroup set with the specified ID. """ compoundId = datamodel.VariantSetCompoundId.parse(id_) dataset = self.getDataset(compoundId.dataset_id) return dataset.getVariantSet(id_)
Returns the readgroup set with the specified ID.
entailment
def printSummary(self): """ Prints a summary of this data repository to stdout. """ print("Ontologies:") for ontology in self.getOntologys(): print( "", ontology.getOntologyPrefix(), ontology.getName(), ontology.getDataUrl(), sep="\t") print("ReferenceSets:") for referenceSet in self.getReferenceSets(): print( "", referenceSet.getLocalId(), referenceSet.getId(), referenceSet.getDescription(), referenceSet.getDataUrl(), sep="\t") for reference in referenceSet.getReferences(): print( "\t", reference.getLocalId(), reference.getId(), sep="\t") print("Datasets:") for dataset in self.getDatasets(): print( "", dataset.getLocalId(), dataset.getId(), dataset.getDescription(), sep="\t") print("\tReadGroupSets:") for readGroupSet in dataset.getReadGroupSets(): print( "\t", readGroupSet.getLocalId(), readGroupSet.getReferenceSet().getLocalId(), readGroupSet.getId(), readGroupSet.getDataUrl(), sep="\t") for readGroup in readGroupSet.getReadGroups(): print( "\t\t", readGroup.getId(), readGroup.getLocalId(), sep="\t") print("\tVariantSets:") for variantSet in dataset.getVariantSets(): print( "\t", variantSet.getLocalId(), variantSet.getReferenceSet().getLocalId(), variantSet.getId(), sep="\t") if variantSet.getNumVariantAnnotationSets() > 0: print("\t\tVariantAnnotationSets:") for vas in variantSet.getVariantAnnotationSets(): print( "\t\t", vas.getLocalId(), vas.getAnnotationType(), vas.getOntology().getName(), sep="\t") print("\tFeatureSets:") for featureSet in dataset.getFeatureSets(): print( "\t", featureSet.getLocalId(), featureSet.getReferenceSet().getLocalId(), featureSet.getOntology().getName(), featureSet.getId(), sep="\t") print("\tContinuousSets:") for continuousSet in dataset.getContinuousSets(): print( "\t", continuousSet.getLocalId(), continuousSet.getReferenceSet().getLocalId(), continuousSet.getId(), sep="\t") print("\tPhenotypeAssociationSets:") for phenotypeAssociationSet in \ dataset.getPhenotypeAssociationSets(): print( "\t", phenotypeAssociationSet.getLocalId(), phenotypeAssociationSet.getParentContainer().getId(), sep="\t") # TODO - please improve this listing print("\tRnaQuantificationSets:") for rna_quantification_set in dataset.getRnaQuantificationSets(): print( "\t", rna_quantification_set.getLocalId(), rna_quantification_set.getId(), sep="\t") for quant in rna_quantification_set.getRnaQuantifications(): print( "\t\t", quant.getLocalId(), quant._description, ",".join(quant._readGroupIds), ",".join(quant._featureSetIds), sep="\t")
Prints a summary of this data repository to stdout.
entailment
def allReadGroups(self): """ Return an iterator over all read groups in the data repo """ for dataset in self.getDatasets(): for readGroupSet in dataset.getReadGroupSets(): for readGroup in readGroupSet.getReadGroups(): yield readGroup
Return an iterator over all read groups in the data repo
entailment
def allFeatures(self): """ Return an iterator over all features in the data repo """ for dataset in self.getDatasets(): for featureSet in dataset.getFeatureSets(): for feature in featureSet.getFeatures(): yield feature
Return an iterator over all features in the data repo
entailment
def allCallSets(self): """ Return an iterator over all call sets in the data repo """ for dataset in self.getDatasets(): for variantSet in dataset.getVariantSets(): for callSet in variantSet.getCallSets(): yield callSet
Return an iterator over all call sets in the data repo
entailment
def allVariantAnnotationSets(self): """ Return an iterator over all variant annotation sets in the data repo """ for dataset in self.getDatasets(): for variantSet in dataset.getVariantSets(): for vaSet in variantSet.getVariantAnnotationSets(): yield vaSet
Return an iterator over all variant annotation sets in the data repo
entailment
def allRnaQuantifications(self): """ Return an iterator over all rna quantifications """ for dataset in self.getDatasets(): for rnaQuantificationSet in dataset.getRnaQuantificationSets(): for rnaQuantification in \ rnaQuantificationSet.getRnaQuantifications(): yield rnaQuantification
Return an iterator over all rna quantifications
entailment
def allExpressionLevels(self): """ Return an iterator over all expression levels """ for dataset in self.getDatasets(): for rnaQuantificationSet in dataset.getRnaQuantificationSets(): for rnaQuantification in \ rnaQuantificationSet.getRnaQuantifications(): for expressionLevel in \ rnaQuantification.getExpressionLevels(): yield expressionLevel
Return an iterator over all expression levels
entailment
def getPeer(self, url): """ Finds a peer by URL and return the first peer record with that URL. """ peers = list(models.Peer.select().where(models.Peer.url == url)) if len(peers) == 0: raise exceptions.PeerNotFoundException(url) return peers[0]
Finds a peer by URL and return the first peer record with that URL.
entailment
def getPeers(self, offset=0, limit=1000): """ Get the list of peers using an SQL offset and limit. Returns a list of peer datamodel objects in a list. """ select = models.Peer.select().order_by( models.Peer.url).limit(limit).offset(offset) return [peers.Peer(p.url, record=p) for p in select]
Get the list of peers using an SQL offset and limit. Returns a list of peer datamodel objects in a list.
entailment
def tableToTsv(self, model): """ Takes a model class and attempts to create a table in TSV format that can be imported into a spreadsheet program. """ first = True for item in model.select(): if first: header = "".join( ["{}\t".format(x) for x in model._meta.fields.keys()]) print(header) first = False row = "".join( ["{}\t".format( getattr(item, key)) for key in model._meta.fields.keys()]) print(row)
Takes a model class and attempts to create a table in TSV format that can be imported into a spreadsheet program.
entailment
def clearAnnouncements(self): """ Flushes the announcement table. """ try: q = models.Announcement.delete().where( models.Announcement.id > 0) q.execute() except Exception as e: raise exceptions.RepoManagerException(e)
Flushes the announcement table.
entailment
def insertAnnouncement(self, announcement): """ Adds an announcement to the registry for later analysis. """ url = announcement.get('url', None) try: peers.Peer(url) except: raise exceptions.BadUrlException(url) try: # TODO get more details about the user agent models.Announcement.create( url=announcement.get('url'), attributes=json.dumps(announcement.get('attributes', {})), remote_addr=announcement.get('remote_addr', None), user_agent=announcement.get('user_agent', None)) except Exception as e: raise exceptions.RepoManagerException(e)
Adds an announcement to the registry for later analysis.
entailment
def open(self, mode=MODE_READ): """ Opens this repo in the specified mode. TODO: figure out the correct semantics of this and document the intended future behaviour as well as the current transitional behaviour. """ if mode not in [MODE_READ, MODE_WRITE]: error = "Open mode must be '{}' or '{}'".format( MODE_READ, MODE_WRITE) raise ValueError(error) self._openMode = mode if mode == MODE_READ: self.assertExists() if mode == MODE_READ: # This is part of the transitional behaviour where # we load the whole DB into memory to get access to # the data model. self.load()
Opens this repo in the specified mode. TODO: figure out the correct semantics of this and document the intended future behaviour as well as the current transitional behaviour.
entailment
def verify(self): """ Verifies that the data in the repository is consistent. """ # TODO this should emit to a log that we can configure so we can # have verbosity levels. We should provide a way to configure # where we look at various chromosomes and so on. This will be # an important debug tool for administrators. for ontology in self.getOntologys(): print( "Verifying Ontology", ontology.getName(), "@", ontology.getDataUrl()) # TODO how do we verify this? Check some well-know SO terms? for referenceSet in self.getReferenceSets(): print( "Verifying ReferenceSet", referenceSet.getLocalId(), "@", referenceSet.getDataUrl()) for reference in referenceSet.getReferences(): length = min(reference.getLength(), 1000) bases = reference.getBases(0, length) assert len(bases) == length print( "\tReading", length, "bases from", reference.getLocalId()) for dataset in self.getDatasets(): print("Verifying Dataset", dataset.getLocalId()) for featureSet in dataset.getFeatureSets(): for referenceSet in self.getReferenceSets(): # TODO cycle through references? reference = referenceSet.getReferences()[0] print( "\tVerifying FeatureSet", featureSet.getLocalId(), "with reference", reference.getLocalId()) length = min(reference.getLength(), 1000) features = featureSet.getFeatures( reference.getLocalId(), 0, length, None, 3) for feature in features: print("\t{}".format(feature)) # for continuousSet in dataset.getContinuousSets(): # -- there is no getContinuous for readGroupSet in dataset.getReadGroupSets(): print( "\tVerifying ReadGroupSet", readGroupSet.getLocalId(), "@", readGroupSet.getDataUrl()) references = readGroupSet.getReferenceSet().getReferences() # TODO should we cycle through the references? Should probably # be an option. reference = references[0] max_alignments = 10 for readGroup in readGroupSet.getReadGroups(): alignments = readGroup.getReadAlignments(reference) for i, alignment in enumerate(alignments): if i == max_alignments: break print( "\t\tRead", i, "alignments from", readGroup.getLocalId()) for variantSet in dataset.getVariantSets(): print("\tVerifying VariantSet", variantSet.getLocalId()) max_variants = 10 max_annotations = 10 refMap = variantSet.getReferenceToDataUrlIndexMap() for referenceName, (dataUrl, indexFile) in refMap.items(): variants = variantSet.getVariants(referenceName, 0, 2**31) for i, variant in enumerate(variants): if i == max_variants: break print( "\t\tRead", i, "variants from reference", referenceName, "@", dataUrl) for annotationSet in variantSet.getVariantAnnotationSets(): print( "\t\tVerifying VariantAnnotationSet", annotationSet.getLocalId()) for referenceName in refMap.keys(): annotations = annotationSet.getVariantAnnotations( referenceName, 0, 2**31) for i, annotation in enumerate(annotations): if i == max_annotations: break print( "\t\t\tRead", i, "annotations from reference", referenceName) for phenotypeAssociationSet \ in dataset.getPhenotypeAssociationSets(): print("\t\tVerifying PhenotypeAssociationSet") print( "\t\t\t", phenotypeAssociationSet.getLocalId(), phenotypeAssociationSet.getParentContainer().getId(), sep="\t")
Verifies that the data in the repository is consistent.
entailment
def insertOntology(self, ontology): """ Inserts the specified ontology into this repository. """ try: models.Ontology.create( id=ontology.getName(), name=ontology.getName(), dataurl=ontology.getDataUrl(), ontologyprefix=ontology.getOntologyPrefix()) except Exception: raise exceptions.DuplicateNameException( ontology.getName())
Inserts the specified ontology into this repository.
entailment
def removeOntology(self, ontology): """ Removes the specified ontology term map from this repository. """ q = models.Ontology.delete().where(id == ontology.getId()) q.execute()
Removes the specified ontology term map from this repository.
entailment
def insertReference(self, reference): """ Inserts the specified reference into this repository. """ models.Reference.create( id=reference.getId(), referencesetid=reference.getParentContainer().getId(), name=reference.getLocalId(), length=reference.getLength(), isderived=reference.getIsDerived(), species=json.dumps(reference.getSpecies()), md5checksum=reference.getMd5Checksum(), sourceaccessions=json.dumps(reference.getSourceAccessions()), sourceuri=reference.getSourceUri())
Inserts the specified reference into this repository.
entailment
def insertReferenceSet(self, referenceSet): """ Inserts the specified referenceSet into this repository. """ try: models.Referenceset.create( id=referenceSet.getId(), name=referenceSet.getLocalId(), description=referenceSet.getDescription(), assemblyid=referenceSet.getAssemblyId(), isderived=referenceSet.getIsDerived(), species=json.dumps(referenceSet.getSpecies()), md5checksum=referenceSet.getMd5Checksum(), sourceaccessions=json.dumps( referenceSet.getSourceAccessions()), sourceuri=referenceSet.getSourceUri(), dataurl=referenceSet.getDataUrl()) for reference in referenceSet.getReferences(): self.insertReference(reference) except Exception: raise exceptions.DuplicateNameException( referenceSet.getLocalId())
Inserts the specified referenceSet into this repository.
entailment
def insertDataset(self, dataset): """ Inserts the specified dataset into this repository. """ try: models.Dataset.create( id=dataset.getId(), name=dataset.getLocalId(), description=dataset.getDescription(), attributes=json.dumps(dataset.getAttributes())) except Exception: raise exceptions.DuplicateNameException( dataset.getLocalId())
Inserts the specified dataset into this repository.
entailment
def removeDataset(self, dataset): """ Removes the specified dataset from this repository. This performs a cascading removal of all items within this dataset. """ for datasetRecord in models.Dataset.select().where( models.Dataset.id == dataset.getId()): datasetRecord.delete_instance(recursive=True)
Removes the specified dataset from this repository. This performs a cascading removal of all items within this dataset.
entailment
def removePhenotypeAssociationSet(self, phenotypeAssociationSet): """ Remove a phenotype association set from the repo """ q = models.Phenotypeassociationset.delete().where( models.Phenotypeassociationset.id == phenotypeAssociationSet.getId()) q.execute()
Remove a phenotype association set from the repo
entailment
def removeFeatureSet(self, featureSet): """ Removes the specified featureSet from this repository. """ q = models.Featureset.delete().where( models.Featureset.id == featureSet.getId()) q.execute()
Removes the specified featureSet from this repository.
entailment
def removeContinuousSet(self, continuousSet): """ Removes the specified continuousSet from this repository. """ q = models.ContinuousSet.delete().where( models.ContinuousSet.id == continuousSet.getId()) q.execute()
Removes the specified continuousSet from this repository.
entailment
def insertReadGroup(self, readGroup): """ Inserts the specified readGroup into the DB. """ statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats())) experimentJson = json.dumps( protocol.toJsonDict(readGroup.getExperiment())) try: models.Readgroup.create( id=readGroup.getId(), readgroupsetid=readGroup.getParentContainer().getId(), name=readGroup.getLocalId(), predictedinsertedsize=readGroup.getPredictedInsertSize(), samplename=readGroup.getSampleName(), description=readGroup.getDescription(), stats=statsJson, experiment=experimentJson, biosampleid=readGroup.getBiosampleId(), attributes=json.dumps(readGroup.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts the specified readGroup into the DB.
entailment
def removeReadGroupSet(self, readGroupSet): """ Removes the specified readGroupSet from this repository. This performs a cascading removal of all items within this readGroupSet. """ for readGroupSetRecord in models.Readgroupset.select().where( models.Readgroupset.id == readGroupSet.getId()): readGroupSetRecord.delete_instance(recursive=True)
Removes the specified readGroupSet from this repository. This performs a cascading removal of all items within this readGroupSet.
entailment
def removeVariantSet(self, variantSet): """ Removes the specified variantSet from this repository. This performs a cascading removal of all items within this variantSet. """ for variantSetRecord in models.Variantset.select().where( models.Variantset.id == variantSet.getId()): variantSetRecord.delete_instance(recursive=True)
Removes the specified variantSet from this repository. This performs a cascading removal of all items within this variantSet.
entailment
def removeBiosample(self, biosample): """ Removes the specified biosample from this repository. """ q = models.Biosample.delete().where( models.Biosample.id == biosample.getId()) q.execute()
Removes the specified biosample from this repository.
entailment
def removeIndividual(self, individual): """ Removes the specified individual from this repository. """ q = models.Individual.delete().where( models.Individual.id == individual.getId()) q.execute()
Removes the specified individual from this repository.
entailment
def insertReadGroupSet(self, readGroupSet): """ Inserts a the specified readGroupSet into this repository. """ programsJson = json.dumps( [protocol.toJsonDict(program) for program in readGroupSet.getPrograms()]) statsJson = json.dumps(protocol.toJsonDict(readGroupSet.getStats())) try: models.Readgroupset.create( id=readGroupSet.getId(), datasetid=readGroupSet.getParentContainer().getId(), referencesetid=readGroupSet.getReferenceSet().getId(), name=readGroupSet.getLocalId(), programs=programsJson, stats=statsJson, dataurl=readGroupSet.getDataUrl(), indexfile=readGroupSet.getIndexFile(), attributes=json.dumps(readGroupSet.getAttributes())) for readGroup in readGroupSet.getReadGroups(): self.insertReadGroup(readGroup) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified readGroupSet into this repository.
entailment
def removeReferenceSet(self, referenceSet): """ Removes the specified referenceSet from this repository. This performs a cascading removal of all references within this referenceSet. However, it does not remove any of the ReadGroupSets or items that refer to this ReferenceSet. These must be deleted before the referenceSet can be removed. """ try: q = models.Reference.delete().where( models.Reference.referencesetid == referenceSet.getId()) q.execute() q = models.Referenceset.delete().where( models.Referenceset.id == referenceSet.getId()) q.execute() except Exception: msg = ("Unable to delete reference set. " "There are objects currently in the registry which are " "aligned against it. Remove these objects before removing " "the reference set.") raise exceptions.RepoManagerException(msg)
Removes the specified referenceSet from this repository. This performs a cascading removal of all references within this referenceSet. However, it does not remove any of the ReadGroupSets or items that refer to this ReferenceSet. These must be deleted before the referenceSet can be removed.
entailment
def insertVariantAnnotationSet(self, variantAnnotationSet): """ Inserts a the specified variantAnnotationSet into this repository. """ analysisJson = json.dumps( protocol.toJsonDict(variantAnnotationSet.getAnalysis())) try: models.Variantannotationset.create( id=variantAnnotationSet.getId(), variantsetid=variantAnnotationSet.getParentContainer().getId(), ontologyid=variantAnnotationSet.getOntology().getId(), name=variantAnnotationSet.getLocalId(), analysis=analysisJson, annotationtype=variantAnnotationSet.getAnnotationType(), created=variantAnnotationSet.getCreationTime(), updated=variantAnnotationSet.getUpdatedTime(), attributes=json.dumps(variantAnnotationSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified variantAnnotationSet into this repository.
entailment
def insertCallSet(self, callSet): """ Inserts a the specified callSet into this repository. """ try: models.Callset.create( id=callSet.getId(), name=callSet.getLocalId(), variantsetid=callSet.getParentContainer().getId(), biosampleid=callSet.getBiosampleId(), attributes=json.dumps(callSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified callSet into this repository.
entailment
def insertVariantSet(self, variantSet): """ Inserts a the specified variantSet into this repository. """ # We cheat a little here with the VariantSetMetadata, and encode these # within the table as a JSON dump. These should really be stored in # their own table metadataJson = json.dumps( [protocol.toJsonDict(metadata) for metadata in variantSet.getMetadata()]) urlMapJson = json.dumps(variantSet.getReferenceToDataUrlIndexMap()) try: models.Variantset.create( id=variantSet.getId(), datasetid=variantSet.getParentContainer().getId(), referencesetid=variantSet.getReferenceSet().getId(), name=variantSet.getLocalId(), created=datetime.datetime.now(), updated=datetime.datetime.now(), metadata=metadataJson, dataurlindexmap=urlMapJson, attributes=json.dumps(variantSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e) for callSet in variantSet.getCallSets(): self.insertCallSet(callSet)
Inserts a the specified variantSet into this repository.
entailment
def insertFeatureSet(self, featureSet): """ Inserts a the specified featureSet into this repository. """ # TODO add support for info and sourceUri fields. try: models.Featureset.create( id=featureSet.getId(), datasetid=featureSet.getParentContainer().getId(), referencesetid=featureSet.getReferenceSet().getId(), ontologyid=featureSet.getOntology().getId(), name=featureSet.getLocalId(), dataurl=featureSet.getDataUrl(), attributes=json.dumps(featureSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified featureSet into this repository.
entailment
def insertContinuousSet(self, continuousSet): """ Inserts a the specified continuousSet into this repository. """ # TODO add support for info and sourceUri fields. try: models.ContinuousSet.create( id=continuousSet.getId(), datasetid=continuousSet.getParentContainer().getId(), referencesetid=continuousSet.getReferenceSet().getId(), name=continuousSet.getLocalId(), dataurl=continuousSet.getDataUrl(), attributes=json.dumps(continuousSet.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Inserts a the specified continuousSet into this repository.
entailment
def insertBiosample(self, biosample): """ Inserts the specified Biosample into this repository. """ try: models.Biosample.create( id=biosample.getId(), datasetid=biosample.getParentContainer().getId(), name=biosample.getLocalId(), description=biosample.getDescription(), disease=json.dumps(biosample.getDisease()), created=biosample.getCreated(), updated=biosample.getUpdated(), individualid=biosample.getIndividualId(), attributes=json.dumps(biosample.getAttributes()), individualAgeAtCollection=json.dumps( biosample.getIndividualAgeAtCollection())) except Exception: raise exceptions.DuplicateNameException( biosample.getLocalId(), biosample.getParentContainer().getLocalId())
Inserts the specified Biosample into this repository.
entailment
def insertIndividual(self, individual): """ Inserts the specified individual into this repository. """ try: models.Individual.create( id=individual.getId(), datasetId=individual.getParentContainer().getId(), name=individual.getLocalId(), description=individual.getDescription(), created=individual.getCreated(), updated=individual.getUpdated(), species=json.dumps(individual.getSpecies()), sex=json.dumps(individual.getSex()), attributes=json.dumps(individual.getAttributes())) except Exception: raise exceptions.DuplicateNameException( individual.getLocalId(), individual.getParentContainer().getLocalId())
Inserts the specified individual into this repository.
entailment
def insertPhenotypeAssociationSet(self, phenotypeAssociationSet): """ Inserts the specified phenotype annotation set into this repository. """ datasetId = phenotypeAssociationSet.getParentContainer().getId() attributes = json.dumps(phenotypeAssociationSet.getAttributes()) try: models.Phenotypeassociationset.create( id=phenotypeAssociationSet.getId(), name=phenotypeAssociationSet.getLocalId(), datasetid=datasetId, dataurl=phenotypeAssociationSet._dataUrl, attributes=attributes) except Exception: raise exceptions.DuplicateNameException( phenotypeAssociationSet.getParentContainer().getId())
Inserts the specified phenotype annotation set into this repository.
entailment
def insertRnaQuantificationSet(self, rnaQuantificationSet): """ Inserts a the specified rnaQuantificationSet into this repository. """ try: models.Rnaquantificationset.create( id=rnaQuantificationSet.getId(), datasetid=rnaQuantificationSet.getParentContainer().getId(), referencesetid=rnaQuantificationSet.getReferenceSet().getId(), name=rnaQuantificationSet.getLocalId(), dataurl=rnaQuantificationSet.getDataUrl(), attributes=json.dumps(rnaQuantificationSet.getAttributes())) except Exception: raise exceptions.DuplicateNameException( rnaQuantificationSet.getLocalId(), rnaQuantificationSet.getParentContainer().getLocalId())
Inserts a the specified rnaQuantificationSet into this repository.
entailment
def removeRnaQuantificationSet(self, rnaQuantificationSet): """ Removes the specified rnaQuantificationSet from this repository. This performs a cascading removal of all items within this rnaQuantificationSet. """ q = models.Rnaquantificationset.delete().where( models.Rnaquantificationset.id == rnaQuantificationSet.getId()) q.execute()
Removes the specified rnaQuantificationSet from this repository. This performs a cascading removal of all items within this rnaQuantificationSet.
entailment
def insertPeer(self, peer): """ Accepts a peer datamodel object and adds it to the registry. """ try: models.Peer.create( url=peer.getUrl(), attributes=json.dumps(peer.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
Accepts a peer datamodel object and adds it to the registry.
entailment
def removePeer(self, url): """ Remove peers by URL. """ q = models.Peer.delete().where( models.Peer.url == url) q.execute()
Remove peers by URL.
entailment
def initialise(self): """ Initialise this data repository, creating any necessary directories and file paths. """ self._checkWriteMode() self._createSystemTable() self._createNetworkTables() self._createOntologyTable() self._createReferenceSetTable() self._createReferenceTable() self._createDatasetTable() self._createReadGroupSetTable() self._createReadGroupTable() self._createCallSetTable() self._createVariantSetTable() self._createVariantAnnotationSetTable() self._createFeatureSetTable() self._createContinuousSetTable() self._createBiosampleTable() self._createIndividualTable() self._createPhenotypeAssociationSetTable() self._createRnaQuantificationSetTable()
Initialise this data repository, creating any necessary directories and file paths.
entailment
def load(self): """ Loads this data repository into memory. """ self._readSystemTable() self._readOntologyTable() self._readReferenceSetTable() self._readReferenceTable() self._readDatasetTable() self._readReadGroupSetTable() self._readReadGroupTable() self._readVariantSetTable() self._readCallSetTable() self._readVariantAnnotationSetTable() self._readFeatureSetTable() self._readContinuousSetTable() self._readBiosampleTable() self._readIndividualTable() self._readPhenotypeAssociationSetTable() self._readRnaQuantificationSetTable()
Loads this data repository into memory.
entailment
def populateFromRow(self, featureSetRecord): """ Populates the instance variables of this FeatureSet from the specified DB row. """ self._dbFilePath = featureSetRecord.dataurl self.setAttributesJson(featureSetRecord.attributes) self.populateFromFile(self._dbFilePath)
Populates the instance variables of this FeatureSet from the specified DB row.
entailment
def populateFromFile(self, dataUrl): """ Populates the instance variables of this FeatureSet from the specified data URL. Initialize dataset, using the passed dict of sources [{source,format}] see rdflib.parse() for more If path is set, this backend will load itself """ self._dbFilePath = dataUrl # initialize graph self._rdfGraph = rdflib.ConjunctiveGraph() # save the path self._dataUrl = dataUrl self._scanDataFiles(self._dataUrl, ['*.ttl']) # extract version cgdTTL = rdflib.URIRef("http://data.monarchinitiative.org/ttl/cgd.ttl") versionInfo = rdflib.URIRef( u'http://www.w3.org/2002/07/owl#versionInfo') self._version = None for _, _, obj in self._rdfGraph.triples((cgdTTL, versionInfo, None)): self._version = obj.toPython() # setup location cache self._initializeLocationCache()
Populates the instance variables of this FeatureSet from the specified data URL. Initialize dataset, using the passed dict of sources [{source,format}] see rdflib.parse() for more If path is set, this backend will load itself
entailment
def getFeature(self, compoundId): """ find a feature and return ga4gh representation, use compoundId as featureId """ feature = self._getFeatureById(compoundId.featureId) feature.id = str(compoundId) return feature
find a feature and return ga4gh representation, use compoundId as featureId
entailment
def _getFeatureById(self, featureId): """ find a feature and return ga4gh representation, use 'native' id as featureId """ featureRef = rdflib.URIRef(featureId) featureDetails = self._detailTuples([featureRef]) feature = {} for detail in featureDetails: feature[detail['predicate']] = [] for detail in featureDetails: feature[detail['predicate']].append(detail['object']) pbFeature = protocol.Feature() term = protocol.OntologyTerm() # Schema for feature only supports one type of `type` # here we default to first OBO defined for featureType in sorted(feature[TYPE]): if "obolibrary" in featureType: term.term = self._featureTypeLabel(featureType) term.term_id = featureType pbFeature.feature_type.MergeFrom(term) break pbFeature.id = featureId # Schema for feature only supports one type of `name` `symbol` # here we default to shortest for symbol and longest for name feature[LABEL].sort(key=len) pbFeature.gene_symbol = feature[LABEL][0] pbFeature.name = feature[LABEL][-1] pbFeature.attributes.MergeFrom(protocol.Attributes()) for key in feature: for val in sorted(feature[key]): pbFeature.attributes.attr[key].values.add().string_value = val if featureId in self._locationMap: location = self._locationMap[featureId] pbFeature.reference_name = location["chromosome"] pbFeature.start = location["begin"] pbFeature.end = location["end"] return pbFeature
find a feature and return ga4gh representation, use 'native' id as featureId
entailment
def _filterSearchFeaturesRequest(self, reference_name, gene_symbol, name, start, end): """ formulate a sparql query string based on parameters """ filters = [] query = self._baseQuery() filters = [] location = self._findLocation(reference_name, start, end) if location: filters.append("?feature = <{}>".format(location)) if gene_symbol: filters.append('regex(?feature_label, "{}")') if name: filters.append( 'regex(?feature_label, "{}")'.format(name)) # apply filters filter = "FILTER ({})".format(' && '.join(filters)) if len(filters) == 0: filter = "" query = query.replace("#%FILTER%", filter) return query
formulate a sparql query string based on parameters
entailment
def _findLocation(self, reference_name, start, end): """ return a location key form the locationMap """ try: # TODO - sequence_annotations does not have build? return self._locationMap['hg19'][reference_name][start][end] except: return None
return a location key form the locationMap
entailment
def _initializeLocationCache(self): """ CGD uses Faldo ontology for locations, it's a bit complicated. This function sets up an in memory cache of all locations, which can be queried via: locationMap[build][chromosome][begin][end] = location["_id"] """ # cache of locations self._locationMap = {} locationMap = self._locationMap triples = self._rdfGraph.triples Ref = rdflib.URIRef associations = [] for subj, _, _ in triples((None, RDF.type, Ref(ASSOCIATION))): associations.append(subj.toPython()) locationIds = [] for association in associations: for _, _, obj in triples((Ref(association), Ref(HAS_SUBJECT), None)): locationIds.append(obj.toPython()) locations = [] for _id in locationIds: location = {} location["_id"] = _id for subj, predicate, obj in triples((Ref(location["_id"]), None, None)): if not predicate.toPython() in location: location[predicate.toPython()] = [] bisect.insort(location[predicate.toPython()], obj.toPython()) if FALDO_LOCATION in location: locations.append(location) for location in locations: for _id in location[FALDO_LOCATION]: # lookup faldo region, ensure positions are sorted faldoLocation = {} faldoLocation["_id"] = _id for subj, predicate, obj in triples((Ref(faldoLocation["_id"]), None, None)): if not predicate.toPython() in faldoLocation: faldoLocation[predicate.toPython()] = [] bisect.insort(faldoLocation[predicate.toPython()], obj.toPython()) faldoBegins = [] for _id in faldoLocation[FALDO_BEGIN]: faldoBegin = {} faldoBegin["_id"] = _id for subj, predicate, obj in triples( (Ref(faldoBegin["_id"]), None, None)): faldoBegin[predicate.toPython()] = obj.toPython() faldoBegins.append(faldoBegin) faldoReferences = [] for _id in faldoLocation[FALDO_BEGIN]: faldoReference = {} faldoReference["_id"] = faldoBegin[FALDO_REFERENCE] for subj, predicate, obj in triples( (Ref(faldoReference["_id"]), None, None)): faldoReference[predicate.toPython()] = obj.toPython() faldoReferences.append(faldoReference) faldoEnds = [] for _id in faldoLocation[FALDO_END]: faldoEnd = {} faldoEnd["_id"] = _id for subj, predicate, obj in triples((Ref(faldoEnd["_id"]), None, None)): faldoEnd[predicate.toPython()] = obj.toPython() faldoEnds.append(faldoEnd) for idx, faldoReference in enumerate(faldoReferences): if MEMBER_OF in faldoReference: build = faldoReference[MEMBER_OF].split('/')[-1] chromosome = faldoReference[LABEL].split(' ')[0] begin = faldoBegins[idx][FALDO_POSITION] end = faldoEnds[idx][FALDO_POSITION] if build not in locationMap: locationMap[build] = {} if chromosome not in locationMap[build]: locationMap[build][chromosome] = {} if begin not in locationMap[build][chromosome]: locationMap[build][chromosome][begin] = {} if end not in locationMap[build][chromosome][begin]: locationMap[build][chromosome][begin][end] = {} locationMap[build][chromosome][begin][end] = \ location["_id"] locationMap[location["_id"]] = { "build": build, "chromosome": chromosome, "begin": begin, "end": end, }
CGD uses Faldo ontology for locations, it's a bit complicated. This function sets up an in memory cache of all locations, which can be queried via: locationMap[build][chromosome][begin][end] = location["_id"]
entailment
def addValue(self, protocolElement): """ Appends the specified protocolElement to the value list for this response. """ self._numElements += 1 self._bufferSize += protocolElement.ByteSize() attr = getattr(self._protoObject, self._valueListName) obj = attr.add() obj.CopyFrom(protocolElement)
Appends the specified protocolElement to the value list for this response.
entailment
def isFull(self): """ Returns True if the response buffer is full, and False otherwise. The buffer is full if either (1) the number of items in the value list is >= pageSize or (2) the total length of the serialised elements in the page is >= maxBufferSize. If page_size or max_response_length were not set in the request then they're not checked. """ return ( (self._pageSize > 0 and self._numElements >= self._pageSize) or (self._bufferSize >= self._maxBufferSize) )
Returns True if the response buffer is full, and False otherwise. The buffer is full if either (1) the number of items in the value list is >= pageSize or (2) the total length of the serialised elements in the page is >= maxBufferSize. If page_size or max_response_length were not set in the request then they're not checked.
entailment
def getSerializedResponse(self): """ Returns a string version of the SearchResponse that has been built by this SearchResponseBuilder. """ self._protoObject.next_page_token = pb.string(self._nextPageToken) s = protocol.toJson(self._protoObject) return s
Returns a string version of the SearchResponse that has been built by this SearchResponseBuilder.
entailment
def populateFromRow(self, ontologyRecord): """ Populates this Ontology using values in the specified DB row. """ self._id = ontologyRecord.id self._dataUrl = ontologyRecord.dataurl self._readFile()
Populates this Ontology using values in the specified DB row.
entailment
def getGaTermByName(self, name): """ Returns a GA4GH OntologyTerm object by name. :param name: name of the ontology term, ex. "gene". :return: GA4GH OntologyTerm object. """ # TODO what is the correct value when we have no mapping?? termIds = self.getTermIds(name) if len(termIds) == 0: termId = "" # TODO add logging for missed term translation. else: # TODO what is the correct behaviour here when we have multiple # IDs matching a given name? termId = termIds[0] term = protocol.OntologyTerm() term.term = name term.term_id = termId return term
Returns a GA4GH OntologyTerm object by name. :param name: name of the ontology term, ex. "gene". :return: GA4GH OntologyTerm object.
entailment
def _heavyQuery(variantSetId, callSetIds): """ Very heavy query: calls for the specified list of callSetIds on chromosome 2 (11 pages, 90 seconds to fetch the entire thing on a high-end desktop machine) """ request = protocol.SearchVariantsRequest() request.reference_name = '2' request.variant_set_id = variantSetId for callSetId in callSetIds: request.call_set_ids.add(callSetId) request.page_size = 100 request.end = 100000 return request
Very heavy query: calls for the specified list of callSetIds on chromosome 2 (11 pages, 90 seconds to fetch the entire thing on a high-end desktop machine)
entailment
def timeOneSearch(queryString): """ Returns (search result as JSON string, time elapsed during search) """ startTime = time.clock() resultString = backend.runSearchVariants(queryString) endTime = time.clock() elapsedTime = endTime - startTime return resultString, elapsedTime
Returns (search result as JSON string, time elapsed during search)
entailment
def benchmarkOneQuery(request, repeatLimit=3, pageLimit=3): """ Repeat the query several times; perhaps don't go through *all* the pages. Returns minimum time to run backend.searchVariants() to execute the query (as far as pageLimit allows), *not* including JSON processing to prepare queries or parse responses. """ times = [] queryString = protocol.toJson(request) for i in range(0, repeatLimit): resultString, elapsedTime = timeOneSearch(queryString) accruedTime = elapsedTime pageCount = 1 token = extractNextPageToken(resultString) # Iterate to go beyond the first page of results. while token is not None and pageCount < pageLimit: pageRequest = request pageRequest.page_token = token pageRequestString = protocol.toJson(pageRequest) resultString, elapsedTime = timeOneSearch(pageRequestString) accruedTime += elapsedTime pageCount = pageCount + 1 token = extractNextPageToken(resultString) times.append(accruedTime) # TODO: more sophisticated statistics. Sometimes we want min(), # sometimes mean = sum() / len(), sometimes other measures, # perhaps exclude outliers... # If we compute average we should throw out at least the first one. # return sum(times[2:])/len(times[2:]) return min(times)
Repeat the query several times; perhaps don't go through *all* the pages. Returns minimum time to run backend.searchVariants() to execute the query (as far as pageLimit allows), *not* including JSON processing to prepare queries or parse responses.
entailment
def getExceptionClass(errorCode): """ Converts the specified error code into the corresponding class object. Raises a KeyError if the errorCode is not found. """ classMap = {} for name, class_ in inspect.getmembers(sys.modules[__name__]): if inspect.isclass(class_) and issubclass(class_, BaseServerException): classMap[class_.getErrorCode()] = class_ return classMap[errorCode]
Converts the specified error code into the corresponding class object. Raises a KeyError if the errorCode is not found.
entailment
def toProtocolElement(self): """ Converts this exception into the GA4GH protocol type so that it can be communicated back to the client. """ error = protocol.GAException() error.error_code = self.getErrorCode() error.message = self.getMessage() return error
Converts this exception into the GA4GH protocol type so that it can be communicated back to the client.
entailment
def _init_goterm_ref(self, rec_curr, name, lnum): """Initialize new reference and perform checks.""" if rec_curr is None: return GOTerm() msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name) self._die(msg, lnum)
Initialize new reference and perform checks.
entailment
def _init_typedef(self, typedef_curr, name, lnum): """Initialize new typedef and perform checks.""" if typedef_curr is None: return TypeDef() msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name) self._die(msg, lnum)
Initialize new typedef and perform checks.
entailment