sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def runSearchContinuousSets(self, request): """ Returns a SearchContinuousSetsResponse for the specified SearchContinuousSetsRequest object. """ return self.runSearchRequest( request, protocol.SearchContinuousSetsRequest, protocol.SearchContinuousSetsResponse, self.continuousSetsGenerator)
Returns a SearchContinuousSetsResponse for the specified SearchContinuousSetsRequest object.
entailment
def runSearchContinuous(self, request): """ Returns a SearchContinuousResponse for the specified SearchContinuousRequest object. :param request: JSON string representing searchContinuousRequest :return: JSON string representing searchContinuousResponse """ return self.runSearchRequest( request, protocol.SearchContinuousRequest, protocol.SearchContinuousResponse, self.continuousGenerator)
Returns a SearchContinuousResponse for the specified SearchContinuousRequest object. :param request: JSON string representing searchContinuousRequest :return: JSON string representing searchContinuousResponse
entailment
def runSearchRnaQuantificationSets(self, request): """ Returns a SearchRnaQuantificationSetsResponse for the specified SearchRnaQuantificationSetsRequest object. """ return self.runSearchRequest( request, protocol.SearchRnaQuantificationSetsRequest, protocol.SearchRnaQuantificationSetsResponse, self.rnaQuantificationSetsGenerator)
Returns a SearchRnaQuantificationSetsResponse for the specified SearchRnaQuantificationSetsRequest object.
entailment
def runSearchRnaQuantifications(self, request): """ Returns a SearchRnaQuantificationResponse for the specified SearchRnaQuantificationRequest object. """ return self.runSearchRequest( request, protocol.SearchRnaQuantificationsRequest, protocol.SearchRnaQuantificationsResponse, self.rnaQuantificationsGenerator)
Returns a SearchRnaQuantificationResponse for the specified SearchRnaQuantificationRequest object.
entailment
def runSearchExpressionLevels(self, request): """ Returns a SearchExpressionLevelResponse for the specified SearchExpressionLevelRequest object. """ return self.runSearchRequest( request, protocol.SearchExpressionLevelsRequest, protocol.SearchExpressionLevelsResponse, self.expressionLevelsGenerator)
Returns a SearchExpressionLevelResponse for the specified SearchExpressionLevelRequest object.
entailment
def populateFromRow(self, dataset): """ Populates the instance variables of this Dataset from the specified database row. """ self._description = dataset.description self.setAttributesJson(dataset.attributes)
Populates the instance variables of this Dataset from the specified database row.
entailment
def addVariantSet(self, variantSet): """ Adds the specified variantSet to this dataset. """ id_ = variantSet.getId() self._variantSetIdMap[id_] = variantSet self._variantSetNameMap[variantSet.getLocalId()] = variantSet self._variantSetIds.append(id_)
Adds the specified variantSet to this dataset.
entailment
def addBiosample(self, biosample): """ Adds the specified biosample to this dataset. """ id_ = biosample.getId() self._biosampleIdMap[id_] = biosample self._biosampleIds.append(id_) self._biosampleNameMap[biosample.getName()] = biosample
Adds the specified biosample to this dataset.
entailment
def addIndividual(self, individual): """ Adds the specified individual to this dataset. """ id_ = individual.getId() self._individualIdMap[id_] = individual self._individualIds.append(id_) self._individualNameMap[individual.getName()] = individual
Adds the specified individual to this dataset.
entailment
def addFeatureSet(self, featureSet): """ Adds the specified featureSet to this dataset. """ id_ = featureSet.getId() self._featureSetIdMap[id_] = featureSet self._featureSetIds.append(id_) name = featureSet.getLocalId() self._featureSetNameMap[name] = featureSet
Adds the specified featureSet to this dataset.
entailment
def addContinuousSet(self, continuousSet): """ Adds the specified continuousSet to this dataset. """ id_ = continuousSet.getId() self._continuousSetIdMap[id_] = continuousSet self._continuousSetIds.append(id_) name = continuousSet.getLocalId() self._continuousSetNameMap[name] = continuousSet
Adds the specified continuousSet to this dataset.
entailment
def addReadGroupSet(self, readGroupSet): """ Adds the specified readGroupSet to this dataset. """ id_ = readGroupSet.getId() self._readGroupSetIdMap[id_] = readGroupSet self._readGroupSetNameMap[readGroupSet.getLocalId()] = readGroupSet self._readGroupSetIds.append(id_)
Adds the specified readGroupSet to this dataset.
entailment
def addRnaQuantificationSet(self, rnaQuantSet): """ Adds the specified rnaQuantification set to this dataset. """ id_ = rnaQuantSet.getId() self._rnaQuantificationSetIdMap[id_] = rnaQuantSet self._rnaQuantificationSetIds.append(id_) name = rnaQuantSet.getLocalId() self._rnaQuantificationSetNameMap[name] = rnaQuantSet
Adds the specified rnaQuantification set to this dataset.
entailment
def getVariantSet(self, id_): """ Returns the VariantSet with the specified name, or raises a VariantSetNotFoundException otherwise. """ if id_ not in self._variantSetIdMap: raise exceptions.VariantSetNotFoundException(id_) return self._variantSetIdMap[id_]
Returns the VariantSet with the specified name, or raises a VariantSetNotFoundException otherwise.
entailment
def getVariantSetByName(self, name): """ Returns a VariantSet with the specified name, or raises a VariantSetNameNotFoundException if it does not exist. """ if name not in self._variantSetNameMap: raise exceptions.VariantSetNameNotFoundException(name) return self._variantSetNameMap[name]
Returns a VariantSet with the specified name, or raises a VariantSetNameNotFoundException if it does not exist.
entailment
def addPhenotypeAssociationSet(self, phenotypeAssociationSet): """ Adds the specified g2p association set to this backend. """ id_ = phenotypeAssociationSet.getId() self._phenotypeAssociationSetIdMap[id_] = phenotypeAssociationSet self._phenotypeAssociationSetNameMap[ phenotypeAssociationSet.getLocalId()] = phenotypeAssociationSet self._phenotypeAssociationSetIds.append(id_)
Adds the specified g2p association set to this backend.
entailment
def getFeatureSet(self, id_): """ Returns the FeatureSet with the specified id, or raises a FeatureSetNotFoundException otherwise. """ if id_ not in self._featureSetIdMap: raise exceptions.FeatureSetNotFoundException(id_) return self._featureSetIdMap[id_]
Returns the FeatureSet with the specified id, or raises a FeatureSetNotFoundException otherwise.
entailment
def getFeatureSetByName(self, name): """ Returns the FeatureSet with the specified name, or raises an exception otherwise. """ if name not in self._featureSetNameMap: raise exceptions.FeatureSetNameNotFoundException(name) return self._featureSetNameMap[name]
Returns the FeatureSet with the specified name, or raises an exception otherwise.
entailment
def getContinuousSet(self, id_): """ Returns the ContinuousSet with the specified id, or raises a ContinuousSetNotFoundException otherwise. """ if id_ not in self._continuousSetIdMap: raise exceptions.ContinuousSetNotFoundException(id_) return self._continuousSetIdMap[id_]
Returns the ContinuousSet with the specified id, or raises a ContinuousSetNotFoundException otherwise.
entailment
def getContinuousSetByName(self, name): """ Returns the ContinuousSet with the specified name, or raises an exception otherwise. """ if name not in self._continuousSetNameMap: raise exceptions.ContinuousSetNameNotFoundException(name) return self._continuousSetNameMap[name]
Returns the ContinuousSet with the specified name, or raises an exception otherwise.
entailment
def getBiosampleByName(self, name): """ Returns a Biosample with the specified name, or raises a BiosampleNameNotFoundException if it does not exist. """ if name not in self._biosampleNameMap: raise exceptions.BiosampleNameNotFoundException(name) return self._biosampleNameMap[name]
Returns a Biosample with the specified name, or raises a BiosampleNameNotFoundException if it does not exist.
entailment
def getBiosample(self, id_): """ Returns the Biosample with the specified id, or raises a BiosampleNotFoundException otherwise. """ if id_ not in self._biosampleIdMap: raise exceptions.BiosampleNotFoundException(id_) return self._biosampleIdMap[id_]
Returns the Biosample with the specified id, or raises a BiosampleNotFoundException otherwise.
entailment
def getIndividualByName(self, name): """ Returns an individual with the specified name, or raises a IndividualNameNotFoundException if it does not exist. """ if name not in self._individualNameMap: raise exceptions.IndividualNameNotFoundException(name) return self._individualNameMap[name]
Returns an individual with the specified name, or raises a IndividualNameNotFoundException if it does not exist.
entailment
def getIndividual(self, id_): """ Returns the Individual with the specified id, or raises a IndividualNotFoundException otherwise. """ if id_ not in self._individualIdMap: raise exceptions.IndividualNotFoundException(id_) return self._individualIdMap[id_]
Returns the Individual with the specified id, or raises a IndividualNotFoundException otherwise.
entailment
def getReadGroupSetByName(self, name): """ Returns a ReadGroupSet with the specified name, or raises a ReadGroupSetNameNotFoundException if it does not exist. """ if name not in self._readGroupSetNameMap: raise exceptions.ReadGroupSetNameNotFoundException(name) return self._readGroupSetNameMap[name]
Returns a ReadGroupSet with the specified name, or raises a ReadGroupSetNameNotFoundException if it does not exist.
entailment
def getReadGroupSet(self, id_): """ Returns the ReadGroupSet with the specified name, or raises a ReadGroupSetNotFoundException otherwise. """ if id_ not in self._readGroupSetIdMap: raise exceptions.ReadGroupNotFoundException(id_) return self._readGroupSetIdMap[id_]
Returns the ReadGroupSet with the specified name, or raises a ReadGroupSetNotFoundException otherwise.
entailment
def getRnaQuantificationSetByName(self, name): """ Returns the RnaQuantification set with the specified name, or raises an exception otherwise. """ if name not in self._rnaQuantificationSetNameMap: raise exceptions.RnaQuantificationSetNameNotFoundException(name) return self._rnaQuantificationSetNameMap[name]
Returns the RnaQuantification set with the specified name, or raises an exception otherwise.
entailment
def getRnaQuantificationSet(self, id_): """ Returns the RnaQuantification set with the specified name, or raises a RnaQuantificationSetNotFoundException otherwise. """ if id_ not in self._rnaQuantificationSetIdMap: raise exceptions.RnaQuantificationSetNotFoundException(id_) return self._rnaQuantificationSetIdMap[id_]
Returns the RnaQuantification set with the specified name, or raises a RnaQuantificationSetNotFoundException otherwise.
entailment
def parseMalformedBamHeader(headerDict): """ Parses the (probably) intended values out of the specified BAM header dictionary, which is incompletely parsed by pysam. This is caused by some tools incorrectly using spaces instead of tabs as a seperator. """ headerString = " ".join( "{}:{}".format(k, v) for k, v in headerDict.items() if k != 'CL') ret = {} for item in headerString.split(): key, value = item.split(":", 1) # build up dict, casting everything back to original type ret[key] = type(headerDict.get(key, ""))(value) if 'CL' in headerDict: ret['CL'] = headerDict['CL'] return ret
Parses the (probably) intended values out of the specified BAM header dictionary, which is incompletely parsed by pysam. This is caused by some tools incorrectly using spaces instead of tabs as a seperator.
entailment
def _getReadAlignments( self, reference, start, end, readGroupSet, readGroup): """ Returns an iterator over the specified reads """ # TODO If reference is None, return against all references, # including unmapped reads. samFile = self.getFileHandle(self._dataUrl) referenceName = reference.getLocalId().encode() # TODO deal with errors from htslib start, end = self.sanitizeAlignmentFileFetch(start, end) readAlignments = samFile.fetch(referenceName, start, end) for readAlignment in readAlignments: tags = dict(readAlignment.tags) if readGroup is None: if 'RG' in tags: alignmentReadGroupLocalId = tags['RG'] readGroupCompoundId = datamodel.ReadGroupCompoundId( readGroupSet.getCompoundId(), str(alignmentReadGroupLocalId)) yield self.convertReadAlignment( readAlignment, readGroupSet, str(readGroupCompoundId)) else: if self._filterReads: if 'RG' in tags and tags['RG'] == self._localId: yield self.convertReadAlignment( readAlignment, readGroupSet, str(readGroup.getCompoundId())) else: yield self.convertReadAlignment( readAlignment, readGroupSet, str(readGroup.getCompoundId()))
Returns an iterator over the specified reads
entailment
def convertReadAlignment(self, read, readGroupSet, readGroupId): """ Convert a pysam ReadAlignment to a GA4GH ReadAlignment """ samFile = self.getFileHandle(self._dataUrl) # TODO fill out remaining fields # TODO refine in tandem with code in converters module ret = protocol.ReadAlignment() # ret.fragmentId = 'TODO' ret.aligned_quality.extend(read.query_qualities) ret.aligned_sequence = read.query_sequence if SamFlags.isFlagSet(read.flag, SamFlags.READ_UNMAPPED): ret.ClearField("alignment") else: ret.alignment.CopyFrom(protocol.LinearAlignment()) ret.alignment.mapping_quality = read.mapping_quality ret.alignment.position.CopyFrom(protocol.Position()) ret.alignment.position.reference_name = samFile.getrname( read.reference_id) ret.alignment.position.position = read.reference_start ret.alignment.position.strand = protocol.POS_STRAND if SamFlags.isFlagSet(read.flag, SamFlags.READ_REVERSE_STRAND): ret.alignment.position.strand = protocol.NEG_STRAND for operation, length in read.cigar: gaCigarUnit = ret.alignment.cigar.add() gaCigarUnit.operation = SamCigar.int2ga(operation) gaCigarUnit.operation_length = length gaCigarUnit.reference_sequence = "" # TODO fix this! ret.duplicate_fragment = SamFlags.isFlagSet( read.flag, SamFlags.DUPLICATE_READ) ret.failed_vendor_quality_checks = SamFlags.isFlagSet( read.flag, SamFlags.FAILED_QUALITY_CHECK) ret.fragment_length = read.template_length ret.fragment_name = read.query_name for key, value in read.tags: # Useful for inspecting the structure of read tags # print("{key} {ktype}: {value}, {vtype}".format( # key=key, ktype=type(key), value=value, vtype=type(value))) protocol.setAttribute(ret.attributes.attr[key].values, value) if SamFlags.isFlagSet(read.flag, SamFlags.MATE_UNMAPPED): ret.next_mate_position.Clear() else: ret.next_mate_position.Clear() if read.next_reference_id != -1: ret.next_mate_position.reference_name = samFile.getrname( read.next_reference_id) else: ret.next_mate_position.reference_name = "" ret.next_mate_position.position = read.next_reference_start ret.next_mate_position.strand = protocol.POS_STRAND if SamFlags.isFlagSet(read.flag, SamFlags.MATE_REVERSE_STRAND): ret.next_mate_position.strand = protocol.NEG_STRAND if SamFlags.isFlagSet(read.flag, SamFlags.READ_PAIRED): ret.number_reads = 2 else: ret.number_reads = 1 ret.read_number = -1 if SamFlags.isFlagSet(read.flag, SamFlags.FIRST_IN_PAIR): if SamFlags.isFlagSet(read.flag, SamFlags.SECOND_IN_PAIR): ret.read_number = 2 else: ret.read_number = 0 elif SamFlags.isFlagSet(read.flag, SamFlags.SECOND_IN_PAIR): ret.read_number = 1 ret.improper_placement = not SamFlags.isFlagSet( read.flag, SamFlags.READ_PROPER_PAIR) ret.read_group_id = readGroupId ret.secondary_alignment = SamFlags.isFlagSet( read.flag, SamFlags.SECONDARY_ALIGNMENT) ret.supplementary_alignment = SamFlags.isFlagSet( read.flag, SamFlags.SUPPLEMENTARY_ALIGNMENT) ret.id = readGroupSet.getReadAlignmentId(ret) return ret
Convert a pysam ReadAlignment to a GA4GH ReadAlignment
entailment
def addReadGroup(self, readGroup): """ Adds the specified ReadGroup to this ReadGroupSet. """ id_ = readGroup.getId() self._readGroupIdMap[id_] = readGroup self._readGroupIds.append(id_)
Adds the specified ReadGroup to this ReadGroupSet.
entailment
def getReadGroup(self, id_): """ Returns the ReadGroup with the specified id if it exists in this ReadGroupSet, or raises a ReadGroupNotFoundException otherwise. """ if id_ not in self._readGroupIdMap: raise exceptions.ReadGroupNotFoundException(id_) return self._readGroupIdMap[id_]
Returns the ReadGroup with the specified id if it exists in this ReadGroupSet, or raises a ReadGroupNotFoundException otherwise.
entailment
def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReadGroupSet. """ readGroupSet = protocol.ReadGroupSet() readGroupSet.id = self.getId() readGroupSet.read_groups.extend( [readGroup.toProtocolElement() for readGroup in self.getReadGroups()] ) readGroupSet.name = self.getLocalId() readGroupSet.dataset_id = self.getParentContainer().getId() readGroupSet.stats.CopyFrom(self.getStats()) self.serializeAttributes(readGroupSet) return readGroupSet
Returns the GA4GH protocol representation of this ReadGroupSet.
entailment
def getReadAlignmentId(self, gaAlignment): """ Returns a string ID suitable for use in the specified GA ReadAlignment object in this ReadGroupSet. """ compoundId = datamodel.ReadAlignmentCompoundId( self.getCompoundId(), gaAlignment.fragment_name) return str(compoundId)
Returns a string ID suitable for use in the specified GA ReadAlignment object in this ReadGroupSet.
entailment
def getStats(self): """ Returns the GA4GH protocol representation of this read group set's ReadStats. """ stats = protocol.ReadStats() stats.aligned_read_count = self._numAlignedReads stats.unaligned_read_count = self._numUnalignedReads return stats
Returns the GA4GH protocol representation of this read group set's ReadStats.
entailment
def getReadAlignments(self, reference, start=None, end=None): """ Returns an iterator over the specified reads """ return self._getReadAlignments(reference, start, end, self, None)
Returns an iterator over the specified reads
entailment
def populateFromRow(self, readGroupSetRecord): """ Populates the instance variables of this ReadGroupSet from the specified database row. """ self._dataUrl = readGroupSetRecord.dataurl self._indexFile = readGroupSetRecord.indexfile self._programs = [] for jsonDict in json.loads(readGroupSetRecord.programs): program = protocol.fromJson(json.dumps(jsonDict), protocol.Program) self._programs.append(program) stats = protocol.fromJson(readGroupSetRecord.stats, protocol.ReadStats) self._numAlignedReads = stats.aligned_read_count self._numUnalignedReads = stats.unaligned_read_count
Populates the instance variables of this ReadGroupSet from the specified database row.
entailment
def populateFromFile(self, dataUrl, indexFile=None): """ Populates the instance variables of this ReadGroupSet from the specified dataUrl and indexFile. If indexFile is not specified guess usual form. """ self._dataUrl = dataUrl self._indexFile = indexFile if indexFile is None: self._indexFile = dataUrl + ".bai" samFile = self.getFileHandle(self._dataUrl) self._setHeaderFields(samFile) if 'RG' not in samFile.header or len(samFile.header['RG']) == 0: readGroup = HtslibReadGroup(self, self.defaultReadGroupName) self.addReadGroup(readGroup) else: for readGroupHeader in samFile.header['RG']: readGroup = HtslibReadGroup(self, readGroupHeader['ID']) readGroup.populateFromHeader(readGroupHeader) self.addReadGroup(readGroup) self._bamHeaderReferenceSetName = None for referenceInfo in samFile.header['SQ']: if 'AS' not in referenceInfo: infoDict = parseMalformedBamHeader(referenceInfo) else: infoDict = referenceInfo name = infoDict.get('AS', references.DEFAULT_REFERENCESET_NAME) if self._bamHeaderReferenceSetName is None: self._bamHeaderReferenceSetName = name elif self._bamHeaderReferenceSetName != name: raise exceptions.MultipleReferenceSetsInReadGroupSet( self._dataUrl, name, self._bamFileReferenceName) self._numAlignedReads = samFile.mapped self._numUnalignedReads = samFile.unmapped
Populates the instance variables of this ReadGroupSet from the specified dataUrl and indexFile. If indexFile is not specified guess usual form.
entailment
def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReadGroup. """ # TODO this is very incomplete, but we don't have the # implementation to fill out the rest of the fields currently readGroup = protocol.ReadGroup() readGroup.id = self.getId() readGroup.created = self._creationTime readGroup.updated = self._updateTime dataset = self.getParentContainer().getParentContainer() readGroup.dataset_id = dataset.getId() readGroup.name = self.getLocalId() readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize()) referenceSet = self._parentContainer.getReferenceSet() readGroup.sample_name = pb.string(self.getSampleName()) readGroup.biosample_id = pb.string(self.getBiosampleId()) if referenceSet is not None: readGroup.reference_set_id = referenceSet.getId() readGroup.stats.CopyFrom(self.getStats()) readGroup.programs.extend(self.getPrograms()) readGroup.description = pb.string(self.getDescription()) readGroup.experiment.CopyFrom(self.getExperiment()) self.serializeAttributes(readGroup) return readGroup
Returns the GA4GH protocol representation of this ReadGroup.
entailment
def getStats(self): """ Returns the GA4GH protocol representation of this read group's ReadStats. """ stats = protocol.ReadStats() stats.aligned_read_count = self.getNumAlignedReads() stats.unaligned_read_count = self.getNumUnalignedReads() # TODO base_count requires iterating through all reads return stats
Returns the GA4GH protocol representation of this read group's ReadStats.
entailment
def getExperiment(self): """ Returns the GA4GH protocol representation of this read group's Experiment. """ experiment = protocol.Experiment() experiment.id = self.getExperimentId() experiment.instrument_model = pb.string(self.getInstrumentModel()) experiment.sequencing_center = pb.string(self.getSequencingCenter()) experiment.description = pb.string(self.getExperimentDescription()) experiment.library = pb.string(self.getLibrary()) experiment.platform_unit = pb.string(self.getPlatformUnit()) experiment.message_create_time = self._iso8601 experiment.message_update_time = self._iso8601 experiment.run_time = pb.string(self.getRunTime()) return experiment
Returns the GA4GH protocol representation of this read group's Experiment.
entailment
def populateFromHeader(self, readGroupHeader): """ Populate the instance variables using the specified SAM header. """ self._sampleName = readGroupHeader.get('SM', None) self._description = readGroupHeader.get('DS', None) if 'PI' in readGroupHeader: self._predictedInsertSize = int(readGroupHeader['PI']) self._instrumentModel = readGroupHeader.get('PL', None) self._sequencingCenter = readGroupHeader.get('CN', None) self._experimentDescription = readGroupHeader.get('DS', None) self._library = readGroupHeader.get('LB', None) self._platformUnit = readGroupHeader.get('PU', None) self._runTime = readGroupHeader.get('DT', None)
Populate the instance variables using the specified SAM header.
entailment
def populateFromRow(self, readGroupRecord): """ Populate the instance variables using the specified DB row. """ self._sampleName = readGroupRecord.samplename self._biosampleId = readGroupRecord.biosampleid self._description = readGroupRecord.description self._predictedInsertSize = readGroupRecord.predictedinsertsize stats = protocol.fromJson(readGroupRecord.stats, protocol.ReadStats) self._numAlignedReads = stats.aligned_read_count self._numUnalignedReads = stats.unaligned_read_count experiment = protocol.fromJson( readGroupRecord.experiment, protocol.Experiment) self._instrumentModel = experiment.instrument_model self._sequencingCenter = experiment.sequencing_center self._experimentDescription = experiment.description self._library = experiment.library self._platformUnit = experiment.platform_unit self._runTime = experiment.run_time
Populate the instance variables using the specified DB row.
entailment
def getNameFromPath(filePath): """ Returns the filename of the specified path without its extensions. This is usually how we derive the default name for a given object. """ if len(filePath) == 0: raise ValueError("Cannot have empty path for name") fileName = os.path.split(os.path.normpath(filePath))[1] # We need to handle things like .fa.gz, so we can't use # os.path.splitext ret = fileName.split(".")[0] assert ret != "" return ret
Returns the filename of the specified path without its extensions. This is usually how we derive the default name for a given object.
entailment
def repoExitError(message): """ Exits the repo manager with error status. """ wrapper = textwrap.TextWrapper( break_on_hyphens=False, break_long_words=False) formatted = wrapper.fill("{}: error: {}".format(sys.argv[0], message)) sys.exit(formatted)
Exits the repo manager with error status.
entailment
def _updateRepo(self, func, *args, **kwargs): """ Runs the specified function that updates the repo with the specified arguments. This method ensures that all updates are transactional, so that if any part of the update fails no changes are made to the repo. """ # TODO how do we make this properly transactional? self._repo.open(datarepo.MODE_WRITE) try: func(*args, **kwargs) self._repo.commit() finally: self._repo.close()
Runs the specified function that updates the repo with the specified arguments. This method ensures that all updates are transactional, so that if any part of the update fails no changes are made to the repo.
entailment
def addOntology(self): """ Adds a new Ontology to this repo. """ self._openRepo() name = self._args.name filePath = self._getFilePath(self._args.filePath, self._args.relativePath) if name is None: name = getNameFromPath(filePath) ontology = ontologies.Ontology(name) ontology.populateFromFile(filePath) self._updateRepo(self._repo.insertOntology, ontology)
Adds a new Ontology to this repo.
entailment
def addDataset(self): """ Adds a new dataset into this repo. """ self._openRepo() dataset = datasets.Dataset(self._args.datasetName) dataset.setDescription(self._args.description) dataset.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertDataset, dataset)
Adds a new dataset into this repo.
entailment
def addReferenceSet(self): """ Adds a new reference set into this repo. """ self._openRepo() name = self._args.name filePath = self._getFilePath(self._args.filePath, self._args.relativePath) if name is None: name = getNameFromPath(self._args.filePath) referenceSet = references.HtslibReferenceSet(name) referenceSet.populateFromFile(filePath) referenceSet.setDescription(self._args.description) if self._args.species is not None: referenceSet.setSpeciesFromJson(self._args.species) referenceSet.setIsDerived(self._args.isDerived) referenceSet.setAssemblyId(self._args.assemblyId) referenceSet.setAttributes(json.loads(self._args.attributes)) sourceAccessions = [] if self._args.sourceAccessions is not None: sourceAccessions = self._args.sourceAccessions.split(",") referenceSet.setSourceAccessions(sourceAccessions) referenceSet.setSourceUri(self._args.sourceUri) self._updateRepo(self._repo.insertReferenceSet, referenceSet)
Adds a new reference set into this repo.
entailment
def addReadGroupSet(self): """ Adds a new ReadGroupSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrl = self._args.dataFile indexFile = self._args.indexFile parsed = urlparse.urlparse(dataUrl) # TODO, add https support and others when they have been # tested. if parsed.scheme in ['http', 'ftp']: if indexFile is None: raise exceptions.MissingIndexException(dataUrl) else: if indexFile is None: indexFile = dataUrl + ".bai" dataUrl = self._getFilePath(self._args.dataFile, self._args.relativePath) indexFile = self._getFilePath(indexFile, self._args.relativePath) name = self._args.name if self._args.name is None: name = getNameFromPath(dataUrl) readGroupSet = reads.HtslibReadGroupSet(dataset, name) readGroupSet.populateFromFile(dataUrl, indexFile) referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the BAM header. referenceSetName = readGroupSet.getBamHeaderReferenceSetName() referenceSet = self._repo.getReferenceSetByName(referenceSetName) readGroupSet.setReferenceSet(referenceSet) readGroupSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertReadGroupSet, readGroupSet)
Adds a new ReadGroupSet into this repo.
entailment
def addVariantSet(self): """ Adds a new VariantSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrls = self._args.dataFiles name = self._args.name if len(dataUrls) == 1: if self._args.name is None: name = getNameFromPath(dataUrls[0]) if os.path.isdir(dataUrls[0]): # Read in the VCF files from the directory. # TODO support uncompressed VCF and BCF files vcfDir = dataUrls[0] pattern = os.path.join(vcfDir, "*.vcf.gz") dataUrls = glob.glob(pattern) if len(dataUrls) == 0: raise exceptions.RepoManagerException( "Cannot find any VCF files in the directory " "'{}'.".format(vcfDir)) dataUrls[0] = self._getFilePath(dataUrls[0], self._args.relativePath) elif self._args.name is None: raise exceptions.RepoManagerException( "Cannot infer the intended name of the VariantSet when " "more than one VCF file is provided. Please provide a " "name argument using --name.") parsed = urlparse.urlparse(dataUrls[0]) if parsed.scheme not in ['http', 'ftp']: dataUrls = map(lambda url: self._getFilePath( url, self._args.relativePath), dataUrls) # Now, get the index files for the data files that we've now obtained. indexFiles = self._args.indexFiles if indexFiles is None: # First check if all the paths exist locally, as they must # if we are making a default index path. for dataUrl in dataUrls: if not os.path.exists(dataUrl): raise exceptions.MissingIndexException( "Cannot find file '{}'. All variant files must be " "stored locally if the default index location is " "used. If you are trying to create a VariantSet " "based on remote URLs, please download the index " "files to the local file system and provide them " "with the --indexFiles argument".format(dataUrl)) # We assume that the indexes are made by adding .tbi indexSuffix = ".tbi" # TODO support BCF input properly here by adding .csi indexFiles = [filename + indexSuffix for filename in dataUrls] indexFiles = map(lambda url: self._getFilePath( url, self._args.relativePath), indexFiles) variantSet = variants.HtslibVariantSet(dataset, name) variantSet.populateFromFile(dataUrls, indexFiles) # Get the reference set that is associated with the variant set. referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the VCF header. referenceSetName = variantSet.getVcfHeaderReferenceSetName() if referenceSetName is None: raise exceptions.RepoManagerException( "Cannot infer the ReferenceSet from the VCF header. Please " "specify the ReferenceSet to associate with this " "VariantSet using the --referenceSetName option") referenceSet = self._repo.getReferenceSetByName(referenceSetName) variantSet.setReferenceSet(referenceSet) variantSet.setAttributes(json.loads(self._args.attributes)) # Now check for annotations annotationSets = [] if variantSet.isAnnotated() and self._args.addAnnotationSets: ontologyName = self._args.ontologyName if ontologyName is None: raise exceptions.RepoManagerException( "A sequence ontology name must be provided") ontology = self._repo.getOntologyByName(ontologyName) self._checkSequenceOntology(ontology) for annotationSet in variantSet.getVariantAnnotationSets(): annotationSet.setOntology(ontology) annotationSets.append(annotationSet) # Add the annotation sets and the variant set as an atomic update def updateRepo(): self._repo.insertVariantSet(variantSet) for annotationSet in annotationSets: self._repo.insertVariantAnnotationSet(annotationSet) self._updateRepo(updateRepo)
Adds a new VariantSet into this repo.
entailment
def addPhenotypeAssociationSet(self): """ Adds a new phenotype association set to this repo. """ self._openRepo() name = self._args.name if name is None: name = getNameFromPath(self._args.dirPath) dataset = self._repo.getDatasetByName(self._args.datasetName) phenotypeAssociationSet = \ genotype_phenotype.RdfPhenotypeAssociationSet( dataset, name, self._args.dirPath) phenotypeAssociationSet.setAttributes( json.loads(self._args.attributes)) self._updateRepo( self._repo.insertPhenotypeAssociationSet, phenotypeAssociationSet)
Adds a new phenotype association set to this repo.
entailment
def removePhenotypeAssociationSet(self): """ Removes a phenotype association set from the repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) phenotypeAssociationSet = dataset.getPhenotypeAssociationSetByName( self._args.name) def func(): self._updateRepo( self._repo.removePhenotypeAssociationSet, phenotypeAssociationSet) self._confirmDelete( "PhenotypeAssociationSet", phenotypeAssociationSet.getLocalId(), func)
Removes a phenotype association set from the repo
entailment
def removeReferenceSet(self): """ Removes a referenceSet from the repo. """ self._openRepo() referenceSet = self._repo.getReferenceSetByName( self._args.referenceSetName) def func(): self._updateRepo(self._repo.removeReferenceSet, referenceSet) self._confirmDelete("ReferenceSet", referenceSet.getLocalId(), func)
Removes a referenceSet from the repo.
entailment
def removeReadGroupSet(self): """ Removes a readGroupSet from the repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) readGroupSet = dataset.getReadGroupSetByName( self._args.readGroupSetName) def func(): self._updateRepo(self._repo.removeReadGroupSet, readGroupSet) self._confirmDelete("ReadGroupSet", readGroupSet.getLocalId(), func)
Removes a readGroupSet from the repo.
entailment
def removeVariantSet(self): """ Removes a variantSet from the repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) variantSet = dataset.getVariantSetByName(self._args.variantSetName) def func(): self._updateRepo(self._repo.removeVariantSet, variantSet) self._confirmDelete("VariantSet", variantSet.getLocalId(), func)
Removes a variantSet from the repo.
entailment
def removeDataset(self): """ Removes a dataset from the repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) def func(): self._updateRepo(self._repo.removeDataset, dataset) self._confirmDelete("Dataset", dataset.getLocalId(), func)
Removes a dataset from the repo.
entailment
def addFeatureSet(self): """ Adds a new feature set into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) filePath = self._getFilePath(self._args.filePath, self._args.relativePath) name = getNameFromPath(self._args.filePath) featureSet = sequence_annotations.Gff3DbFeatureSet( dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) featureSet.setReferenceSet(referenceSet) ontologyName = self._args.ontologyName if ontologyName is None: raise exceptions.RepoManagerException( "A sequence ontology name must be provided") ontology = self._repo.getOntologyByName(ontologyName) self._checkSequenceOntology(ontology) featureSet.setOntology(ontology) featureSet.populateFromFile(filePath) featureSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertFeatureSet, featureSet)
Adds a new feature set into this repo
entailment
def removeFeatureSet(self): """ Removes a feature set from this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) featureSet = dataset.getFeatureSetByName(self._args.featureSetName) def func(): self._updateRepo(self._repo.removeFeatureSet, featureSet) self._confirmDelete("FeatureSet", featureSet.getLocalId(), func)
Removes a feature set from this repo
entailment
def addContinuousSet(self): """ Adds a new continuous set into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) filePath = self._getFilePath(self._args.filePath, self._args.relativePath) name = getNameFromPath(self._args.filePath) continuousSet = continuous.FileContinuousSet(dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) continuousSet.setReferenceSet(referenceSet) continuousSet.populateFromFile(filePath) self._updateRepo(self._repo.insertContinuousSet, continuousSet)
Adds a new continuous set into this repo
entailment
def removeContinuousSet(self): """ Removes a continuous set from this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) continuousSet = dataset.getContinuousSetByName( self._args.continuousSetName) def func(): self._updateRepo(self._repo.removeContinuousSet, continuousSet) self._confirmDelete("ContinuousSet", continuousSet.getLocalId(), func)
Removes a continuous set from this repo
entailment
def addBiosample(self): """ Adds a new biosample into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosample = bio_metadata.Biosample( dataset, self._args.biosampleName) biosample.populateFromJson(self._args.biosample) self._updateRepo(self._repo.insertBiosample, biosample)
Adds a new biosample into this repo
entailment
def removeBiosample(self): """ Removes a biosample from this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosample = dataset.getBiosampleByName(self._args.biosampleName) def func(): self._updateRepo(self._repo.removeBiosample, biosample) self._confirmDelete("Biosample", biosample.getLocalId(), func)
Removes a biosample from this repo
entailment
def addIndividual(self): """ Adds a new individual into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = bio_metadata.Individual( dataset, self._args.individualName) individual.populateFromJson(self._args.individual) self._updateRepo(self._repo.insertIndividual, individual)
Adds a new individual into this repo
entailment
def removeIndividual(self): """ Removes an individual from this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = dataset.getIndividualByName(self._args.individualName) def func(): self._updateRepo(self._repo.removeIndividual, individual) self._confirmDelete("Individual", individual.getLocalId(), func)
Removes an individual from this repo
entailment
def addPeer(self): """ Adds a new peer into this repo """ self._openRepo() try: peer = peers.Peer( self._args.url, json.loads(self._args.attributes)) except exceptions.BadUrlException: raise exceptions.RepoManagerException("The URL for the peer was " "malformed.") except ValueError as e: raise exceptions.RepoManagerException( "The attributes message " "was malformed. {}".format(e)) self._updateRepo(self._repo.insertPeer, peer)
Adds a new peer into this repo
entailment
def removePeer(self): """ Removes a peer by URL from this repo """ self._openRepo() def func(): self._updateRepo(self._repo.removePeer, self._args.url) self._confirmDelete("Peer", self._args.url, func)
Removes a peer by URL from this repo
entailment
def removeOntology(self): """ Removes an ontology from the repo. """ self._openRepo() ontology = self._repo.getOntologyByName(self._args.ontologyName) def func(): self._updateRepo(self._repo.removeOntology, ontology) self._confirmDelete("Ontology", ontology.getName(), func)
Removes an ontology from the repo.
entailment
def addRnaQuantification(self): """ Adds an rnaQuantification into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosampleId = "" if self._args.biosampleName: biosample = dataset.getBiosampleByName(self._args.biosampleName) biosampleId = biosample.getId() if self._args.name is None: name = getNameFromPath(self._args.quantificationFilePath) else: name = self._args.name # TODO: programs not fully supported by GA4GH yet programs = "" featureType = "gene" if self._args.transcript: featureType = "transcript" rnaseq2ga.rnaseq2ga( self._args.quantificationFilePath, self._args.filePath, name, self._args.format, dataset=dataset, featureType=featureType, description=self._args.description, programs=programs, featureSetNames=self._args.featureSetNames, readGroupSetNames=self._args.readGroupSetName, biosampleId=biosampleId)
Adds an rnaQuantification into this repo
entailment
def initRnaQuantificationSet(self): """ Initialize an empty RNA quantification set """ store = rnaseq2ga.RnaSqliteStore(self._args.filePath) store.createTables()
Initialize an empty RNA quantification set
entailment
def addRnaQuantificationSet(self): """ Adds an rnaQuantificationSet into this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) if self._args.name is None: name = getNameFromPath(self._args.filePath) else: name = self._args.name rnaQuantificationSet = rna_quantification.SqliteRnaQuantificationSet( dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) rnaQuantificationSet.setReferenceSet(referenceSet) rnaQuantificationSet.populateFromFile(self._args.filePath) rnaQuantificationSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo( self._repo.insertRnaQuantificationSet, rnaQuantificationSet)
Adds an rnaQuantificationSet into this repo
entailment
def removeRnaQuantificationSet(self): """ Removes an rnaQuantificationSet from this repo """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) rnaQuantSet = dataset.getRnaQuantificationSetByName( self._args.rnaQuantificationSetName) def func(): self._updateRepo(self._repo.removeRnaQuantificationSet, rnaQuantSet) self._confirmDelete( "RnaQuantificationSet", rnaQuantSet.getLocalId(), func)
Removes an rnaQuantificationSet from this repo
entailment
def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType, dataset=None, featureType="gene", description="", programs="", featureSetNames="", readGroupSetNames="", biosampleId=""): """ Reads RNA Quantification data in one of several formats and stores the data in a sqlite database for use by the GA4GH reference server. Supports the following quantification output types: Cufflinks, kallisto, RSEM. """ readGroupSetName = "" if readGroupSetNames: readGroupSetName = readGroupSetNames.strip().split(",")[0] featureSetIds = "" readGroupIds = "" if dataset: featureSetIdList = [] if featureSetNames: for annotationName in featureSetNames.split(","): featureSet = dataset.getFeatureSetByName(annotationName) featureSetIdList.append(featureSet.getId()) featureSetIds = ",".join(featureSetIdList) # TODO: multiple readGroupSets if readGroupSetName: readGroupSet = dataset.getReadGroupSetByName(readGroupSetName) readGroupIds = ",".join( [x.getId() for x in readGroupSet.getReadGroups()]) if rnaType not in SUPPORTED_RNA_INPUT_FORMATS: raise exceptions.UnsupportedFormatException(rnaType) rnaDB = RnaSqliteStore(sqlFilename) if rnaType == "cufflinks": writer = CufflinksWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "kallisto": writer = KallistoWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "rsem": writer = RsemWriter(rnaDB, featureType, dataset=dataset) writeRnaseqTable(rnaDB, [localName], description, featureSetIds, readGroupId=readGroupIds, programs=programs, biosampleId=biosampleId) writeExpressionTable(writer, [(localName, quantificationFilename)])
Reads RNA Quantification data in one of several formats and stores the data in a sqlite database for use by the GA4GH reference server. Supports the following quantification output types: Cufflinks, kallisto, RSEM.
entailment
def addRNAQuantification(self, datafields): """ Adds an RNAQuantification to the db. Datafields is a tuple in the order: id, feature_set_ids, description, name, read_group_ids, programs, biosample_id """ self._rnaValueList.append(datafields) if len(self._rnaValueList) >= self._batchSize: self.batchaddRNAQuantification()
Adds an RNAQuantification to the db. Datafields is a tuple in the order: id, feature_set_ids, description, name, read_group_ids, programs, biosample_id
entailment
def addExpression(self, datafields): """ Adds an Expression to the db. Datafields is a tuple in the order: id, rna_quantification_id, name, expression, is_normalized, raw_read_count, score, units, conf_low, conf_hi """ self._expressionValueList.append(datafields) if len(self._expressionValueList) >= self._batchSize: self.batchAddExpression()
Adds an Expression to the db. Datafields is a tuple in the order: id, rna_quantification_id, name, expression, is_normalized, raw_read_count, score, units, conf_low, conf_hi
entailment
def createIndices(self): """ Index columns that are queried. The expression index can take a long time. """ sql = '''CREATE INDEX name_index ON Expression (name)''' self._cursor.execute(sql) self._dbConn.commit() sql = '''CREATE INDEX expression_index ON Expression (expression)''' self._cursor.execute(sql) self._dbConn.commit()
Index columns that are queried. The expression index can take a long time.
entailment
def writeExpression(self, rnaQuantificationId, quantfilename): """ Reads the quantification results file and adds entries to the specified database. """ isNormalized = self._isNormalized units = self._units with open(quantfilename, "r") as quantFile: quantificationReader = csv.reader(quantFile, delimiter=b"\t") header = next(quantificationReader) expressionLevelColNum = self.setColNum( header, self._expressionLevelCol) nameColNum = self.setColNum(header, self._nameCol) countColNum = self.setColNum(header, self._countCol, -1) confColLowNum = self.setColNum(header, self._confColLow, -1) confColHiNum = self.setColNum(header, self._confColHi, -1) expressionId = 0 for expression in quantificationReader: expressionLevel = expression[expressionLevelColNum] name = expression[nameColNum] rawCount = 0.0 if countColNum != -1: rawCount = expression[countColNum] confidenceLow = 0.0 confidenceHi = 0.0 score = 0.0 if confColLowNum != -1 and confColHiNum != -1: confidenceLow = float(expression[confColLowNum]) confidenceHi = float(expression[confColHiNum]) score = (confidenceLow + confidenceHi)/2 datafields = (expressionId, rnaQuantificationId, name, expressionLevel, isNormalized, rawCount, score, units, confidenceLow, confidenceHi) self._db.addExpression(datafields) expressionId += 1 self._db.batchAddExpression()
Reads the quantification results file and adds entries to the specified database.
entailment
def _fetchSequence(ac, startIndex=None, endIndex=None): """Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. >>> len(_fetchSequence('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >>> _fetchSequence('NP_056374.2',0,10) 'MESRETLSSS' >>> _fetchSequence('NP_056374.2')[0:10] 'MESRETLSSS' """ urlFmt = ( "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db=nucleotide&id={ac}&rettype=fasta&retmode=text") if startIndex is None or endIndex is None: url = urlFmt.format(ac=ac) else: urlFmt += "&seq_start={start}&seq_stop={stop}" url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex) resp = requests.get(url) resp.raise_for_status() seqlines = resp.content.splitlines()[1:] print("{ac}[{s},{e}) => {n} lines ({u})".format( ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url)) # return response as list of lines, already line wrapped return seqlines
Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. >>> len(_fetchSequence('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >>> _fetchSequence('NP_056374.2',0,10) 'MESRETLSSS' >>> _fetchSequence('NP_056374.2')[0:10] 'MESRETLSSS'
entailment
def createBamHeader(self, baseHeader): """ Creates a new bam header based on the specified header from the parent BAM file. """ header = dict(baseHeader) newSequences = [] for index, referenceInfo in enumerate(header['SQ']): if index < self.numChromosomes: referenceName = referenceInfo['SN'] # The sequence dictionary in the BAM file has to match up # with the sequence ids in the data, so we must be sure # that these still match up. assert referenceName == self.chromosomes[index] newReferenceInfo = { 'AS': self.referenceSetName, 'SN': referenceName, 'LN': 0, # FIXME 'UR': 'http://example.com', 'M5': 'dbb6e8ece0b5de29da56601613007c2a', # FIXME 'SP': 'Human' } newSequences.append(newReferenceInfo) header['SQ'] = newSequences return header
Creates a new bam header based on the specified header from the parent BAM file.
entailment
def createRepo(self): """ Creates the repository for all the data we've just downloaded. """ repo = datarepo.SqlDataRepository(self.repoPath) repo.open("w") repo.initialise() referenceSet = references.HtslibReferenceSet("GRCh37-subset") referenceSet.populateFromFile(self.fastaFilePath) referenceSet.setDescription("Subset of GRCh37 used for demonstration") referenceSet.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') for reference in referenceSet.getReferences(): reference.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') reference.setSourceAccessions( self.accessions[reference.getName()] + ".subset") repo.insertReferenceSet(referenceSet) dataset = datasets.Dataset("1kg-p3-subset") dataset.setDescription("Sample data from 1000 Genomes phase 3") repo.insertDataset(dataset) variantSet = variants.HtslibVariantSet(dataset, "mvncall") variantSet.setReferenceSet(referenceSet) dataUrls = [vcfFile for vcfFile, _ in self.vcfFilePaths] indexFiles = [indexFile for _, indexFile in self.vcfFilePaths] variantSet.populateFromFile(dataUrls, indexFiles) variantSet.checkConsistency() repo.insertVariantSet(variantSet) for sample, (bamFile, indexFile) in zip( self.samples, self.bamFilePaths): readGroupSet = reads.HtslibReadGroupSet(dataset, sample) readGroupSet.populateFromFile(bamFile, indexFile) readGroupSet.setReferenceSet(referenceSet) repo.insertReadGroupSet(readGroupSet) repo.commit() repo.close() self.log("Finished creating the repository; summary:\n") repo.open("r") repo.printSummary()
Creates the repository for all the data we've just downloaded.
entailment
def _configure_backend(app): """A helper function used just to help modularize the code a bit.""" # Allocate the backend # We use URLs to specify the backend. Currently we have file:// URLs (or # URLs with no scheme) for the SqlDataRepository, and special empty:// and # simulated:// URLs for empty or simulated data sources. dataSource = urlparse.urlparse(app.config["DATA_SOURCE"], "file") if dataSource.scheme == "simulated": # Ignore the query string randomSeed = app.config["SIMULATED_BACKEND_RANDOM_SEED"] numCalls = app.config["SIMULATED_BACKEND_NUM_CALLS"] variantDensity = app.config["SIMULATED_BACKEND_VARIANT_DENSITY"] numVariantSets = app.config["SIMULATED_BACKEND_NUM_VARIANT_SETS"] numReferenceSets = app.config[ "SIMULATED_BACKEND_NUM_REFERENCE_SETS"] numReferencesPerReferenceSet = app.config[ "SIMULATED_BACKEND_NUM_REFERENCES_PER_REFERENCE_SET"] numAlignmentsPerReadGroup = app.config[ "SIMULATED_BACKEND_NUM_ALIGNMENTS_PER_READ_GROUP"] numReadGroupsPerReadGroupSet = app.config[ "SIMULATED_BACKEND_NUM_READ_GROUPS_PER_READ_GROUP_SET"] numPhenotypeAssociations = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATIONS"] numPhenotypeAssociationSets = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATION_SETS"] numRnaQuantSets = app.config[ "SIMULATED_BACKEND_NUM_RNA_QUANTIFICATION_SETS"] numExpressionLevels = app.config[ "SIMULATED_BACKEND_NUM_EXPRESSION_LEVELS_PER_RNA_QUANT_SET"] dataRepository = datarepo.SimulatedDataRepository( randomSeed=randomSeed, numCalls=numCalls, variantDensity=variantDensity, numVariantSets=numVariantSets, numReferenceSets=numReferenceSets, numReferencesPerReferenceSet=numReferencesPerReferenceSet, numReadGroupsPerReadGroupSet=numReadGroupsPerReadGroupSet, numAlignments=numAlignmentsPerReadGroup, numPhenotypeAssociations=numPhenotypeAssociations, numPhenotypeAssociationSets=numPhenotypeAssociationSets, numRnaQuantSets=numRnaQuantSets, numExpressionLevels=numExpressionLevels) elif dataSource.scheme == "empty": dataRepository = datarepo.EmptyDataRepository() elif dataSource.scheme == "file": path = os.path.join(dataSource.netloc, dataSource.path) dataRepository = datarepo.SqlDataRepository(path) dataRepository.open(datarepo.MODE_READ) else: raise exceptions.ConfigurationException( "Unsupported data source scheme: " + dataSource.scheme) theBackend = backend.Backend(dataRepository) theBackend.setRequestValidation(app.config["REQUEST_VALIDATION"]) theBackend.setDefaultPageSize(app.config["DEFAULT_PAGE_SIZE"]) theBackend.setMaxResponseLength(app.config["MAX_RESPONSE_LENGTH"]) return theBackend
A helper function used just to help modularize the code a bit.
entailment
def configure(configFile=None, baseConfig="ProductionConfig", port=8000, extraConfig={}): """ TODO Document this critical function! What does it do? What does it assume? """ file_handler = StreamHandler() file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) configStr = 'ga4gh.server.serverconfig:{0}'.format(baseConfig) app.config.from_object(configStr) if os.environ.get('GA4GH_CONFIGURATION') is not None: app.config.from_envvar('GA4GH_CONFIGURATION') if configFile is not None: app.config.from_pyfile(configFile) app.config.update(extraConfig.items()) # Setup file handle cache max size datamodel.fileHandleCache.setMaxCacheSize( app.config["FILE_HANDLE_CACHE_MAX_SIZE"]) # Setup CORS try: cors.CORS(app, allow_headers='Content-Type') except AssertionError: pass app.serverStatus = ServerStatus() app.backend = _configure_backend(app) if app.config.get('SECRET_KEY'): app.secret_key = app.config['SECRET_KEY'] elif app.config.get('OIDC_PROVIDER'): raise exceptions.ConfigurationException( 'OIDC configuration requires a secret key') if app.config.get('CACHE_DIRECTORY'): app.cache_dir = app.config['CACHE_DIRECTORY'] else: app.cache_dir = '/tmp/ga4gh' app.cache = FileSystemCache( app.cache_dir, threshold=5000, default_timeout=600, mode=384) # Peer service initialization network.initialize( app.config.get('INITIAL_PEERS'), app.backend.getDataRepository(), app.logger) app.oidcClient = None app.myPort = port if app.config.get('AUTH0_ENABLED'): emails = app.config.get('AUTH0_AUTHORIZED_EMAILS', '').split(',') [auth.authorize_email(e, app.cache) for e in emails] if "OIDC_PROVIDER" in app.config: # The oic client. If we're testing, we don't want to verify # SSL certificates app.oidcClient = oic.oic.Client( verify_ssl=('TESTING' not in app.config)) try: app.oidcClient.provider_config(app.config['OIDC_PROVIDER']) except requests.exceptions.ConnectionError: configResponse = message.ProviderConfigurationResponse( issuer=app.config['OIDC_PROVIDER'], authorization_endpoint=app.config['OIDC_AUTHZ_ENDPOINT'], token_endpoint=app.config['OIDC_TOKEN_ENDPOINT'], revocation_endpoint=app.config['OIDC_TOKEN_REV_ENDPOINT']) app.oidcClient.handle_provider_config(configResponse, app.config['OIDC_PROVIDER']) # The redirect URI comes from the configuration. # If we are testing, then we allow the automatic creation of a # redirect uri if none is configured redirectUri = app.config.get('OIDC_REDIRECT_URI') if redirectUri is None and app.config.get('TESTING'): redirectUri = 'https://{0}:{1}/oauth2callback'.format( socket.gethostname(), app.myPort) app.oidcClient.redirect_uris = [redirectUri] if redirectUri is []: raise exceptions.ConfigurationException( 'OIDC configuration requires a redirect uri') # We only support dynamic registration while testing. if ('registration_endpoint' in app.oidcClient.provider_info and app.config.get('TESTING')): app.oidcClient.register( app.oidcClient.provider_info["registration_endpoint"], redirect_uris=[redirectUri]) else: response = message.RegistrationResponse( client_id=app.config['OIDC_CLIENT_ID'], client_secret=app.config['OIDC_CLIENT_SECRET'], redirect_uris=[redirectUri], verify_ssl=False) app.oidcClient.store_registration_info(response)
TODO Document this critical function! What does it do? What does it assume?
entailment
def getFlaskResponse(responseString, httpStatus=200): """ Returns a Flask response object for the specified data and HTTP status. """ return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE)
Returns a Flask response object for the specified data and HTTP status.
entailment
def handleHttpPost(request, endpoint): """ Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. """ if request.mimetype and request.mimetype != MIMETYPE: raise exceptions.UnsupportedMediaTypeException() request = request.get_data() if request == '' or request is None: request = '{}' responseStr = endpoint(request) return getFlaskResponse(responseStr)
Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class.
entailment
def handleException(exception): """ Handles an exception that occurs somewhere in the process of handling a request. """ serverException = exception if not isinstance(exception, exceptions.BaseServerException): with app.test_request_context(): app.log_exception(exception) serverException = exceptions.getServerError(exception) error = serverException.toProtocolElement() # If the exception is being viewed by a web browser, we can render a nicer # view. if flask.request and 'Accept' in flask.request.headers and \ flask.request.headers['Accept'].find('text/html') != -1: message = "<h1>Error {}</h1><pre>{}</pre>".format( serverException.httpStatus, protocol.toJson(error)) if serverException.httpStatus == 401 \ or serverException.httpStatus == 403: message += "Please try <a href=\"/login\">logging in</a>." return message else: responseStr = protocol.toJson(error) return getFlaskResponse(responseStr, serverException.httpStatus)
Handles an exception that occurs somewhere in the process of handling a request.
entailment
def startLogin(): """ If we are not logged in, this generates the redirect URL to the OIDC provider and returns the redirect response :return: A redirect response to the OIDC provider """ flask.session["state"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) flask.session["nonce"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) args = { "client_id": app.oidcClient.client_id, "response_type": "code", "scope": ["openid", "profile"], "nonce": flask.session["nonce"], "redirect_uri": app.oidcClient.redirect_uris[0], "state": flask.session["state"] } result = app.oidcClient.do_authorization_request( request_args=args, state=flask.session["state"]) return flask.redirect(result.url)
If we are not logged in, this generates the redirect URL to the OIDC provider and returns the redirect response :return: A redirect response to the OIDC provider
entailment
def checkAuthentication(): """ The request will have a parameter 'key' if it came from the command line client, or have a session key of 'key' if it's the browser. If the token is not found, start the login process. If there is no oidcClient, we are running naked and we don't check. If we're being redirected to the oidcCallback we don't check. :returns None if all is ok (and the request handler continues as usual). Otherwise if the key was in the session (therefore we're in a browser) then startLogin() will redirect to the OIDC provider. If the key was in the request arguments, we're using the command line and just raise an exception. """ if app.oidcClient is None: return if flask.request.endpoint == 'oidcCallback': return key = flask.session.get('key') or flask.request.args.get('key') if key is None or not app.cache.get(key): if 'key' in flask.request.args: raise exceptions.NotAuthenticatedException() else: return startLogin()
The request will have a parameter 'key' if it came from the command line client, or have a session key of 'key' if it's the browser. If the token is not found, start the login process. If there is no oidcClient, we are running naked and we don't check. If we're being redirected to the oidcCallback we don't check. :returns None if all is ok (and the request handler continues as usual). Otherwise if the key was in the session (therefore we're in a browser) then startLogin() will redirect to the OIDC provider. If the key was in the request arguments, we're using the command line and just raise an exception.
entailment
def handleFlaskGetRequest(id_, flaskRequest, endpoint): """ Handles the specified flask request for one of the GET URLs Invokes the specified endpoint to generate a response. """ if flaskRequest.method == "GET": return handleHttpGet(id_, endpoint) else: raise exceptions.MethodNotAllowedException()
Handles the specified flask request for one of the GET URLs Invokes the specified endpoint to generate a response.
entailment
def handleFlaskPostRequest(flaskRequest, endpoint): """ Handles the specified flask request for one of the POST URLS Invokes the specified endpoint to generate a response. """ if flaskRequest.method == "POST": return handleHttpPost(flaskRequest, endpoint) elif flaskRequest.method == "OPTIONS": return handleHttpOptions() else: raise exceptions.MethodNotAllowedException()
Handles the specified flask request for one of the POST URLS Invokes the specified endpoint to generate a response.
entailment
def getVariantAnnotationSets(self, datasetId): """ Returns the list of ReferenceSets for this server. """ # TODO this should be displayed per-variant set, not per dataset. variantAnnotationSets = [] dataset = app.backend.getDataRepository().getDataset(datasetId) for variantSet in dataset.getVariantSets(): variantAnnotationSets.extend( variantSet.getVariantAnnotationSets()) return variantAnnotationSets
Returns the list of ReferenceSets for this server.
entailment
def auth_decorator(app=None): """ This decorator wraps a view function so that it is protected when Auth0 is enabled. This means that any request will be expected to have a signed token in the authorization header if the `AUTH0_ENABLED` configuration setting is True. The authorization header will have the form: "authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....." If a request is not properly signed, an attempt is made to provide the client with useful error messages. This means that if a request is not authorized the underlying view function will not be executed. When `AUTH0_ENABLED` is false, this decorator will simply execute the decorated view without observing the authorization header. :param app: :return: Flask view decorator """ def requires_auth(f): @functools.wraps(f) def decorated(*args, **kwargs): # This decorator will only apply with AUTH0_ENABLED set to True. if app.config.get('AUTH0_ENABLED', False): client_id = app.config.get("AUTH0_CLIENT_ID") client_secret = app.config.get("AUTH0_CLIENT_SECRET") auth_header = flask.request.headers.get('Authorization', None) # Each of these functions will throw a 401 is there is a # problem decoding the token with some helpful error message. if auth_header: token, profile = decode_header( auth_header, client_id, client_secret) else: raise exceptions.NotAuthorizedException() # We store the token in the session so that later # stages can use it to connect identity and authorization. flask.session['auth0_key'] = token # Now we need to make sure that on top of having a good token # They are authorized, and if not provide an error message is_authorized(app.cache, profile['email']) is_active(app.cache, token) return f(*args, **kwargs) return decorated return requires_auth
This decorator wraps a view function so that it is protected when Auth0 is enabled. This means that any request will be expected to have a signed token in the authorization header if the `AUTH0_ENABLED` configuration setting is True. The authorization header will have the form: "authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....." If a request is not properly signed, an attempt is made to provide the client with useful error messages. This means that if a request is not authorized the underlying view function will not be executed. When `AUTH0_ENABLED` is false, this decorator will simply execute the decorated view without observing the authorization header. :param app: :return: Flask view decorator
entailment
def decode_header(auth_header, client_id, client_secret): """ A function that threads the header through decoding and returns a tuple of the token and payload if successful. This does not fully authenticate a request. :param auth_header: :param client_id: :param client_secret: :return: (token, profile) """ return _decode_header( _well_formed( _has_token(_has_bearer(_has_header(auth_header)))), client_id, client_secret)
A function that threads the header through decoding and returns a tuple of the token and payload if successful. This does not fully authenticate a request. :param auth_header: :param client_id: :param client_secret: :return: (token, profile)
entailment
def logout(cache): """ Logs out the current session by removing it from the cache. This is expected to only occur when a session has """ cache.set(flask.session['auth0_key'], None) flask.session.clear() return True
Logs out the current session by removing it from the cache. This is expected to only occur when a session has
entailment
def callback_maker( cache=None, domain='', client_id='', client_secret='', redirect_uri=''): """ This function will generate a view function that can be used to handle the return from Auth0. The "callback" is a redirected session from auth0 that includes the token we can use to authenticate that session. If the session is properly authenticated Auth0 will provide a code so our application can identify the session. Once this has been done we ask for more information about the identified session from Auth0. We then use the email of the user logged in to Auth0 to authorize their token to make further requests by adding it to the application's cache. It sets a value in the cache that sets the current session as logged in. We can then refer to this id_token to later authenticate a session. :param domain: :param client_id: :param client_secret: :param redirect_uri: :return : View function """ def callback_handling(): code = flask.request.args.get('code') if code is None: raise exceptions.NotAuthorizedException( 'The callback expects a well ' 'formatted code, {} was provided'.format(code)) json_header = {'content-type': 'application/json'} # Get auth token token_url = "https://{domain}/oauth/token".format(domain=domain) token_payload = { 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'code': code, 'grant_type': 'authorization_code'} try: token_info = requests.post( token_url, data=json.dumps(token_payload), headers=json_header).json() id_token = token_info['id_token'] access_token = token_info['access_token'] except Exception as e: raise exceptions.NotAuthorizedException( 'The callback from Auth0 did not' 'include the expected tokens: \n' '{}'.format(e.message)) # Get profile information try: user_url = \ "https://{domain}/userinfo?access_token={access_token}".format( domain=domain, access_token=access_token) user_info = requests.get(user_url).json() email = user_info['email'] except Exception as e: raise exceptions.NotAuthorizedException( 'The user profile from Auth0 did ' 'not contain the expected data: \n {}'.format(e.message)) # Log token in user = cache.get(email) if user and user['authorized']: cache.set(id_token, user_info) return flask.redirect('/login?code={}'.format(id_token)) else: return flask.redirect('/login') return callback_handling
This function will generate a view function that can be used to handle the return from Auth0. The "callback" is a redirected session from auth0 that includes the token we can use to authenticate that session. If the session is properly authenticated Auth0 will provide a code so our application can identify the session. Once this has been done we ask for more information about the identified session from Auth0. We then use the email of the user logged in to Auth0 to authorize their token to make further requests by adding it to the application's cache. It sets a value in the cache that sets the current session as logged in. We can then refer to this id_token to later authenticate a session. :param domain: :param client_id: :param client_secret: :param redirect_uri: :return : View function
entailment
def render_login( app=None, scopes='', redirect_uri='', domain='', client_id=''): """ This function will generate a view function that can be used to handle the return from Auth0. The "callback" is a redirected session from auth0 that includes the token we can use to authenticate that session. If the session is properly authenticated Auth0 will provide a code so our application can identify the session. Once this has been done we ask for more information about the identified session from Auth0. We then use the email of the user logged in to Auth0 to authorize their token to make further requests by adding it to the application's cache. It sets a value in the cache that sets the current session as logged in. We can then refer to this id_token to later authenticate a session. :param app: :param scopes: :param redirect_uri: :param domain: :param client_id: :return : Rendered login template """ return app.jinja_env.from_string(LOGIN_HTML).render( scopes=scopes, redirect_uri=redirect_uri, domain=domain, client_id=client_id)
This function will generate a view function that can be used to handle the return from Auth0. The "callback" is a redirected session from auth0 that includes the token we can use to authenticate that session. If the session is properly authenticated Auth0 will provide a code so our application can identify the session. Once this has been done we ask for more information about the identified session from Auth0. We then use the email of the user logged in to Auth0 to authorize their token to make further requests by adding it to the application's cache. It sets a value in the cache that sets the current session as logged in. We can then refer to this id_token to later authenticate a session. :param app: :param scopes: :param redirect_uri: :param domain: :param client_id: :return : Rendered login template
entailment
def render_key(app, key=""): """ Renders a view from the app and a key that lets the current session grab its token. :param app: :param key: :return: Rendered view """ return app.jinja_env.from_string(KEY_HTML).render( key=key)
Renders a view from the app and a key that lets the current session grab its token. :param app: :param key: :return: Rendered view
entailment
def _decode_header(auth_header, client_id, client_secret): """ Takes the header and tries to return an active token and decoded payload. :param auth_header: :param client_id: :param client_secret: :return: (token, profile) """ try: token = auth_header.split()[1] payload = jwt.decode( token, client_secret, audience=client_id) except jwt.ExpiredSignature: raise exceptions.NotAuthorizedException( 'Token has expired, please log in again.') # is valid client except jwt.InvalidAudienceError: message = 'Incorrect audience, expected: {}'.format( client_id) raise exceptions.NotAuthorizedException(message) # is valid token except jwt.DecodeError: raise exceptions.NotAuthorizedException( 'Token signature could not be validated.') except Exception as e: raise exceptions.NotAuthorizedException( 'Token signature was malformed. {}'.format(e.message)) return token, payload
Takes the header and tries to return an active token and decoded payload. :param auth_header: :param client_id: :param client_secret: :return: (token, profile)
entailment
def is_active(cache, token): """ Accepts the cache and ID token and checks to see if the profile is currently logged in. If so, return the token, otherwise throw a NotAuthenticatedException. :param cache: :param token: :return: """ profile = cache.get(token) if not profile: raise exceptions.NotAuthenticatedException( 'The token is good, but you are not logged in. Please ' 'try logging in again.') return profile
Accepts the cache and ID token and checks to see if the profile is currently logged in. If so, return the token, otherwise throw a NotAuthenticatedException. :param cache: :param token: :return:
entailment
def addReference(self, reference): """ Adds the specified reference to this ReferenceSet. """ id_ = reference.getId() self._referenceIdMap[id_] = reference self._referenceNameMap[reference.getLocalId()] = reference self._referenceIds.append(id_)
Adds the specified reference to this ReferenceSet.
entailment