sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _add_to_ref(self, rec_curr, line, lnum): """Add new fields to the current reference.""" # Written by DV Klopfenstein # Examples of record lines containing ':' include: # id: GO:0000002 # name: mitochondrial genome maintenance # namespace: biological_process # def: "The maintenance of ... # is_a: GO:0007005 ! mitochondrion organization mtch = re.match(r'^(\S+):\s*(\S.*)$', line) if mtch: field_name = mtch.group(1) field_value = mtch.group(2) if field_name == "id": self._chk_none(rec_curr.id, lnum) rec_curr.id = field_value elif field_name == "alt_id": rec_curr.alt_ids.append(field_value) elif field_name == "name": self._chk_none(rec_curr.name, lnum) rec_curr.name = field_value elif field_name == "namespace": self._chk_none(rec_curr.namespace, lnum) rec_curr.namespace = field_value elif field_name == "is_a": rec_curr._parents.append(field_value.split()[0]) elif field_name == "is_obsolete" and field_value == "true": rec_curr.is_obsolete = True elif field_name in self.optional_attrs: self.update_rec(rec_curr, field_name, field_value) else: self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
Add new fields to the current reference.
entailment
def update_rec(self, rec, name, value): """Update current GOTerm with optional record.""" # 'def' is a reserved word in python, do not use it as a Class attr. if name == "def": name = "defn" # If we have a relationship, then we will split this into a further # dictionary. if hasattr(rec, name): if name not in self.attrs_scalar: if name not in self.attrs_nested: getattr(rec, name).add(value) else: self._add_nested(rec, name, value) else: raise Exception("ATTR({NAME}) ALREADY SET({VAL})".format( NAME=name, VAL=getattr(rec, name))) else: # Initialize new GOTerm attr if name in self.attrs_scalar: setattr(rec, name, value) elif name not in self.attrs_nested: setattr(rec, name, set([value])) else: name = '_{:s}'.format(name) setattr(rec, name, defaultdict(list)) self._add_nested(rec, name, value)
Update current GOTerm with optional record.
entailment
def _add_to_typedef(self, typedef_curr, line, lnum): """Add new fields to the current typedef.""" mtch = re.match(r'^(\S+):\s*(\S.*)$', line) if mtch: field_name = mtch.group(1) field_value = mtch.group(2).split('!')[0].rstrip() if field_name == "id": self._chk_none(typedef_curr.id, lnum) typedef_curr.id = field_value elif field_name == "name": self._chk_none(typedef_curr.name, lnum) typedef_curr.name = field_value elif field_name == "transitive_over": typedef_curr.transitive_over.append(field_value) elif field_name == "inverse_of": self._chk_none(typedef_curr.inverse_of, lnum) typedef_curr.inverse_of = field_value # Note: there are other tags that aren't imported here. else: self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
Add new fields to the current typedef.
entailment
def _add_nested(self, rec, name, value): """Adds a term's nested attributes.""" # Remove comments and split term into typedef / target term. (typedef, target_term) = value.split('!')[0].rstrip().split(' ') # Save the nested term. getattr(rec, name)[typedef].append(target_term)
Adds a term's nested attributes.
entailment
def _init_optional_attrs(self, optional_attrs): """Prepare to store data from user-desired optional fields. Not loading these optional fields by default saves in space and speed. But allow the possibility for saving these fields, if the user desires, Including: comment consider def is_class_level is_metadata_tag is_transitive relationship replaced_by subset synonym transitive_over xref """ # Written by DV Klopfenstein # Required attributes are always loaded. All others are optionally loaded. self.attrs_req = ['id', 'alt_id', 'name', 'namespace', 'is_a', 'is_obsolete'] self.attrs_scalar = ['comment', 'defn', 'is_class_level', 'is_metadata_tag', 'is_transitive', 'transitive_over'] self.attrs_nested = frozenset(['relationship']) # Allow user to specify either: 'def' or 'defn' # 'def' is an obo field name, but 'defn' is legal Python attribute name fnc = lambda aopt: aopt if aopt != "defn" else "def" if optional_attrs is None: optional_attrs = [] elif isinstance(optional_attrs, str): optional_attrs = [fnc(optional_attrs)] if optional_attrs not in self.attrs_req else [] elif isinstance(optional_attrs, list) or isinstance(optional_attrs, set): optional_attrs = set([fnc(f) for f in optional_attrs if f not in self.attrs_req]) else: raise Exception("optional_attrs arg MUST BE A str, list, or set.") self.optional_attrs = optional_attrs
Prepare to store data from user-desired optional fields. Not loading these optional fields by default saves in space and speed. But allow the possibility for saving these fields, if the user desires, Including: comment consider def is_class_level is_metadata_tag is_transitive relationship replaced_by subset synonym transitive_over xref
entailment
def _die(self, msg, lnum): """Raise an Exception if file read is unexpected.""" raise Exception("**FATAL {FILE}({LNUM}): {MSG}\n".format( FILE=self.obo_file, LNUM=lnum, MSG=msg))
Raise an Exception if file read is unexpected.
entailment
def write_hier_rec(self, gos_printed, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False, include_only=None, go_marks=None, depth=1, dp="-"): """Write hierarchy for a GO Term record.""" # Added by DV Klopfenstein GO_id = self.id # Shortens hierarchy report by only printing the hierarchy # for the sub-set of user-specified GO terms which are connected. if include_only is not None and GO_id not in include_only: return nrp = short_prt and GO_id in gos_printed if go_marks is not None: out.write('{} '.format('>' if GO_id in go_marks else ' ')) if len_dash is not None: # Default character indicating hierarchy level is '-'. # '=' is used to indicate a hierarchical path printed in detail previously. letter = '-' if not nrp or not self.children else '=' dp = ''.join([letter]*depth) out.write('{DASHES:{N}} '.format(DASHES=dp, N=len_dash)) if num_child is not None: out.write('{N:>5} '.format(N=len(self.get_all_children()))) out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format( GO=self.id, L=self.level, D=self.depth, desc=self.name)) # Track GOs previously printed only if needed if short_prt: gos_printed.add(GO_id) # Do not print hierarchy below this turn if it has already been printed if nrp: return depth += 1 if max_depth is not None and depth > max_depth: return for p in self.children: p.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt, include_only, go_marks, depth, dp)
Write hierarchy for a GO Term record.
entailment
def write_hier_all(self, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False): """Write hierarchy for all GO Terms in obo file.""" # Print: [biological_process, molecular_function, and cellular_component] for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']: self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
Write hierarchy for all GO Terms in obo file.
entailment
def write_hier(self, GO_id, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False, include_only=None, go_marks=None): """Write hierarchy for a GO Term.""" gos_printed = set() self[GO_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt, include_only, go_marks)
Write hierarchy for a GO Term.
entailment
def paths_to_top(self, term): """ Returns all possible paths to the root node Each path includes the term given. The order of the path is top -> bottom, i.e. it starts with the root and ends with the given term (inclusively). Parameters: ----------- - term: the id of the GO term, where the paths begin (i.e. the accession 'GO:0003682') Returns: -------- - a list of lists of GO Terms """ # error handling consistent with original authors if term not in self: print("Term %s not found!" % term, file=sys.stderr) return def _paths_to_top_recursive(rec): if rec.level == 0: return [[rec]] paths = [] for parent in rec.parents: top_paths = _paths_to_top_recursive(parent) for top_path in top_paths: top_path.append(rec) paths.append(top_path) return paths go_term = self[term] return _paths_to_top_recursive(go_term)
Returns all possible paths to the root node Each path includes the term given. The order of the path is top -> bottom, i.e. it starts with the root and ends with the given term (inclusively). Parameters: ----------- - term: the id of the GO term, where the paths begin (i.e. the accession 'GO:0003682') Returns: -------- - a list of lists of GO Terms
entailment
def make_graph_pydot(self, recs, nodecolor, edgecolor, dpi, draw_parents=True, draw_children=True): """draw AMIGO style network, lineage containing one query record.""" import pydot G = pydot.Dot(graph_type='digraph', dpi="{}".format(dpi)) # Directed Graph edgeset = set() usr_ids = [rec.id for rec in recs] for rec in recs: if draw_parents: edgeset.update(rec.get_all_parent_edges()) if draw_children: edgeset.update(rec.get_all_child_edges()) lw = self._label_wrap rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts]) nodes = {str(ID):pydot.Node( lw(ID).replace("GO:",""), # Node name shape="box", style="rounded, filled", # Highlight query terms in plum: fillcolor="beige" if ID not in usr_ids else "plum", color=nodecolor) for ID in rec_id_set} # add nodes explicitly via add_node for rec_id, node in nodes.items(): G.add_node(node) for src, target in edgeset: # default layout in graphviz is top->bottom, so we invert # the direction and plot using dir="back" G.add_edge(pydot.Edge(nodes[target], nodes[src], shape="normal", color=edgecolor, label="is_a", dir="back")) return G
draw AMIGO style network, lineage containing one query record.
entailment
def sqliteRowsToDicts(sqliteRows): """ Unpacks sqlite rows as returned by fetchall into an array of simple dicts. :param sqliteRows: array of rows returned from fetchall DB call :return: array of dicts, keyed by the column names. """ return map(lambda r: dict(zip(r.keys(), r)), sqliteRows)
Unpacks sqlite rows as returned by fetchall into an array of simple dicts. :param sqliteRows: array of rows returned from fetchall DB call :return: array of dicts, keyed by the column names.
entailment
def limitsSql(startIndex=0, maxResults=0): """ Construct a SQL LIMIT clause """ if startIndex and maxResults: return " LIMIT {}, {}".format(startIndex, maxResults) elif startIndex: raise Exception("startIndex was provided, but maxResults was not") elif maxResults: return " LIMIT {}".format(maxResults) else: return ""
Construct a SQL LIMIT clause
entailment
def iterativeFetch(query, batchSize=default_batch_size): """ Returns rows of a sql fetch query on demand """ while True: rows = query.fetchmany(batchSize) if not rows: break rowDicts = sqliteRowsToDicts(rows) for rowDict in rowDicts: yield rowDict
Returns rows of a sql fetch query on demand
entailment
def _parsePageToken(pageToken, numValues): """ Parses the specified pageToken and returns a list of the specified number of values. Page tokens are assumed to consist of a fixed number of integers seperated by colons. If the page token does not conform to this specification, raise a InvalidPageToken exception. """ tokens = pageToken.split(":") if len(tokens) != numValues: msg = "Invalid number of values in page token" raise exceptions.BadPageTokenException(msg) try: values = map(int, tokens) except ValueError: msg = "Malformed integers in page token" raise exceptions.BadPageTokenException(msg) return values
Parses the specified pageToken and returns a list of the specified number of values. Page tokens are assumed to consist of a fixed number of integers seperated by colons. If the page token does not conform to this specification, raise a InvalidPageToken exception.
entailment
def _parseIntegerArgument(args, key, defaultValue): """ Attempts to parse the specified key in the specified argument dictionary into an integer. If the argument cannot be parsed, raises a BadRequestIntegerException. If the key is not present, return the specified default value. """ ret = defaultValue try: if key in args: try: ret = int(args[key]) except ValueError: raise exceptions.BadRequestIntegerException(key, args[key]) except TypeError: raise Exception((key, args)) return ret
Attempts to parse the specified key in the specified argument dictionary into an integer. If the argument cannot be parsed, raises a BadRequestIntegerException. If the key is not present, return the specified default value.
entailment
def _initialiseIteration(self): """ Starts a new iteration. """ self._searchIterator = self._search( self._request.start, self._request.end if self._request.end != 0 else None) self._currentObject = next(self._searchIterator, None) if self._currentObject is not None: self._nextObject = next(self._searchIterator, None) self._searchAnchor = self._request.start self._distanceFromAnchor = 0 firstObjectStart = self._getStart(self._currentObject) if firstObjectStart > self._request.start: self._searchAnchor = firstObjectStart
Starts a new iteration.
entailment
def _pickUpIteration(self, searchAnchor, objectsToSkip): """ Picks up iteration from a previously provided page token. There are two different phases here: 1) We are iterating over the initial set of intervals in which start is < the search start coorindate. 2) We are iterating over the remaining intervals in which start >= to the search start coordinate. """ self._searchAnchor = searchAnchor self._distanceFromAnchor = objectsToSkip self._searchIterator = self._search( searchAnchor, self._request.end if self._request.end != 0 else None) obj = next(self._searchIterator) if searchAnchor == self._request.start: # This is the initial set of intervals, we just skip forward # objectsToSkip positions for _ in range(objectsToSkip): obj = next(self._searchIterator) else: # Now, we are past this initial set of intervals. # First, we need to skip forward over the intervals where # start < searchAnchor, as we've seen these already. while self._getStart(obj) < searchAnchor: obj = next(self._searchIterator) # Now, we skip over objectsToSkip objects such that # start == searchAnchor for _ in range(objectsToSkip): if self._getStart(obj) != searchAnchor: raise exceptions.BadPageTokenException obj = next(self._searchIterator) self._currentObject = obj self._nextObject = next(self._searchIterator, None)
Picks up iteration from a previously provided page token. There are two different phases here: 1) We are iterating over the initial set of intervals in which start is < the search start coorindate. 2) We are iterating over the remaining intervals in which start >= to the search start coordinate.
entailment
def next(self): """ Returns the next (object, nextPageToken) pair. """ if self._currentObject is None: raise StopIteration() nextPageToken = None if self._nextObject is not None: start = self._getStart(self._nextObject) # If start > the search anchor, move the search anchor. Otherwise, # increment the distance from the anchor. if start > self._searchAnchor: self._searchAnchor = start self._distanceFromAnchor = 0 else: self._distanceFromAnchor += 1 nextPageToken = "{}:{}".format( self._searchAnchor, self._distanceFromAnchor) ret = self._extractProtocolObject(self._currentObject), nextPageToken self._currentObject = self._nextObject self._nextObject = next(self._searchIterator, None) return ret
Returns the next (object, nextPageToken) pair.
entailment
def filterVariantAnnotation(self, vann): """ Returns true when an annotation should be included. """ # TODO reintroduce feature ID search ret = False if len(self._effects) != 0 and not vann.transcript_effects: return False elif len(self._effects) == 0: return True for teff in vann.transcript_effects: if self.filterEffect(teff): ret = True return ret
Returns true when an annotation should be included.
entailment
def filterEffect(self, teff): """ Returns true when any of the transcript effects are present in the request. """ ret = False for effect in teff.effects: ret = self._matchAnyEffects(effect) or ret return ret
Returns true when any of the transcript effects are present in the request.
entailment
def _checkIdEquality(self, requestedEffect, effect): """ Tests whether a requested effect and an effect present in an annotation are equal. """ return self._idPresent(requestedEffect) and ( effect.term_id == requestedEffect.term_id)
Tests whether a requested effect and an effect present in an annotation are equal.
entailment
def ga4ghImportGlue(): """ Call this method before importing a ga4gh module in the scripts dir. Otherwise, you will be using the installed package instead of the development package. Assumes a certain directory structure. """ path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(path)
Call this method before importing a ga4gh module in the scripts dir. Otherwise, you will be using the installed package instead of the development package. Assumes a certain directory structure.
entailment
def _update(self, dataFile, handle): """ Update the priority of the file handle. The element is first removed and then added to the left of the deque. """ self._cache.remove((dataFile, handle)) self._add(dataFile, handle)
Update the priority of the file handle. The element is first removed and then added to the left of the deque.
entailment
def _removeLru(self): """ Remove the least recently used file handle from the cache. The pop method removes an element from the right of the deque. Returns the name of the file that has been removed. """ (dataFile, handle) = self._cache.pop() handle.close() return dataFile
Remove the least recently used file handle from the cache. The pop method removes an element from the right of the deque. Returns the name of the file that has been removed.
entailment
def getFileHandle(self, dataFile, openMethod): """ Returns handle associated to the filename. If the file is already opened, update its priority in the cache and return its handle. Otherwise, open the file using openMethod, store it in the cache and return the corresponding handle. """ if dataFile in self._memoTable: handle = self._memoTable[dataFile] self._update(dataFile, handle) return handle else: try: handle = openMethod(dataFile) except ValueError: raise exceptions.FileOpenFailedException(dataFile) self._memoTable[dataFile] = handle self._add(dataFile, handle) if len(self._memoTable) > self._maxCacheSize: dataFile = self._removeLru() del self._memoTable[dataFile] return handle
Returns handle associated to the filename. If the file is already opened, update its priority in the cache and return its handle. Otherwise, open the file using openMethod, store it in the cache and return the corresponding handle.
entailment
def join(cls, splits): """ Join an array of ids into a compound id string """ segments = [] for split in splits: segments.append('"{}",'.format(split)) if len(segments) > 0: segments[-1] = segments[-1][:-1] jsonString = '[{}]'.format(''.join(segments)) return jsonString
Join an array of ids into a compound id string
entailment
def parse(cls, compoundIdStr): """ Parses the specified compoundId string and returns an instance of this CompoundId class. :raises: An ObjectWithIdNotFoundException if parsing fails. This is because this method is a client-facing method, and if a malformed identifier (under our internal rules) is provided, the response should be that the identifier does not exist. """ if not isinstance(compoundIdStr, basestring): raise exceptions.BadIdentifierException(compoundIdStr) try: deobfuscated = cls.deobfuscate(compoundIdStr) except TypeError: # When a string that cannot be converted to base64 is passed # as an argument, b64decode raises a TypeError. We must treat # this as an ID not found error. raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) try: encodedSplits = cls.split(deobfuscated) splits = [cls.decode(split) for split in encodedSplits] except (UnicodeDecodeError, ValueError): # Sometimes base64 decoding succeeds but we're left with # unicode gibberish. This is also and IdNotFound. raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) # pull the differentiator out of the splits before instantiating # the class, if the differentiator exists fieldsLength = len(cls.fields) if cls.differentiator is not None: differentiatorIndex = cls.fields.index( cls.differentiatorFieldName) if differentiatorIndex < len(splits): del splits[differentiatorIndex] else: raise exceptions.ObjectWithIdNotFoundException( compoundIdStr) fieldsLength -= 1 if len(splits) != fieldsLength: raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) return cls(None, *splits)
Parses the specified compoundId string and returns an instance of this CompoundId class. :raises: An ObjectWithIdNotFoundException if parsing fails. This is because this method is a client-facing method, and if a malformed identifier (under our internal rules) is provided, the response should be that the identifier does not exist.
entailment
def obfuscate(cls, idStr): """ Mildly obfuscates the specified ID string in an easily reversible fashion. This is not intended for security purposes, but rather to dissuade users from depending on our internal ID structures. """ return unicode(base64.urlsafe_b64encode( idStr.encode('utf-8')).replace(b'=', b''))
Mildly obfuscates the specified ID string in an easily reversible fashion. This is not intended for security purposes, but rather to dissuade users from depending on our internal ID structures.
entailment
def deobfuscate(cls, data): """ Reverses the obfuscation done by the :meth:`obfuscate` method. If an identifier arrives without correct base64 padding this function will append it to the end. """ # the str() call is necessary to convert the unicode string # to an ascii string since the urlsafe_b64decode method # sometimes chokes on unicode strings return base64.urlsafe_b64decode(str(( data + b'A=='[(len(data) - 1) % 4:])))
Reverses the obfuscation done by the :meth:`obfuscate` method. If an identifier arrives without correct base64 padding this function will append it to the end.
entailment
def serializeAttributes(self, msg): """ Sets the attrbutes of a message during serialization. """ attributes = self.getAttributes() for key in attributes: protocol.setAttribute( msg.attributes.attr[key].values, attributes[key]) return msg
Sets the attrbutes of a message during serialization.
entailment
def _scanDataFiles(self, dataDir, patterns): """ Scans the specified directory for files with the specified globbing pattern and calls self._addDataFile for each. Raises an EmptyDirException if no data files are found. """ numDataFiles = 0 for pattern in patterns: scanPath = os.path.join(dataDir, pattern) for filename in glob.glob(scanPath): self._addDataFile(filename) numDataFiles += 1 if numDataFiles == 0: raise exceptions.EmptyDirException(dataDir, patterns)
Scans the specified directory for files with the specified globbing pattern and calls self._addDataFile for each. Raises an EmptyDirException if no data files are found.
entailment
def getInitialPeerList(filePath, logger=None): """ Attempts to get a list of peers from a file specified in configuration. This file has one URL per line and can contain newlines and comments. # Main ga4gh node http://1kgenomes.ga4gh.org # Local intranet peer https://192.168.1.1 The server will attempt to add URLs in this file to its registry at startup and will log a warning if the file isn't found. """ ret = [] with open(filePath) as textFile: ret = textFile.readlines() if len(ret) == 0: if logger: logger.warn("Couldn't load the initial " "peer list. Try adding a " "file named 'initial_peers.txt' " "to {}".format(os.getcwd())) # Remove lines that start with a hash or are empty. return filter(lambda x: x != "" and not x.find("#") != -1, ret)
Attempts to get a list of peers from a file specified in configuration. This file has one URL per line and can contain newlines and comments. # Main ga4gh node http://1kgenomes.ga4gh.org # Local intranet peer https://192.168.1.1 The server will attempt to add URLs in this file to its registry at startup and will log a warning if the file isn't found.
entailment
def insertInitialPeer(dataRepository, url, logger=None): """ Takes the datarepository, a url, and an optional logger and attempts to add the peer into the repository. """ insertPeer = dataRepository.insertPeer try: peer = datamodel.peers.Peer(url) insertPeer(peer) except exceptions.RepoManagerException as exc: if logger: logger.debug( "Peer already in registry {} {}".format(peer.getUrl(), exc)) except exceptions.BadUrlException as exc: if logger: logger.debug("A URL in the initial " "peer list {} was malformed. {}".format(url), exc)
Takes the datarepository, a url, and an optional logger and attempts to add the peer into the repository.
entailment
def isUrl(urlString): """ Attempts to return whether a given URL string is valid by checking for the presence of the URL scheme and netloc using the urlparse module, and then using a regex. From http://stackoverflow.com/questions/7160737/ """ parsed = urlparse.urlparse(urlString) urlparseValid = parsed.netloc != '' and parsed.scheme != '' regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) return regex.match(urlString) and urlparseValid
Attempts to return whether a given URL string is valid by checking for the presence of the URL scheme and netloc using the urlparse module, and then using a regex. From http://stackoverflow.com/questions/7160737/
entailment
def setUrl(self, url): """ Attempt to safely set the URL by string. """ if isUrl(url): self._url = url else: raise exceptions.BadUrlException(url) return self
Attempt to safely set the URL by string.
entailment
def setAttributesJson(self, attributesJson): """ Sets the attributes dictionary from a JSON string. """ try: self._attributes = json.loads(attributesJson) except: raise exceptions.InvalidJsonException(attributesJson) return self
Sets the attributes dictionary from a JSON string.
entailment
def populateFromRow(self, peerRecord): """ This method accepts a model record and sets class variables. """ self.setUrl(peerRecord.url) \ .setAttributesJson(peerRecord.attributes) return self
This method accepts a model record and sets class variables.
entailment
def _topLevelObjectGenerator(self, request, numObjects, getByIndexMethod): """ Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point. """ currentIndex = 0 if request.page_token: currentIndex, = paging._parsePageToken( request.page_token, 1) while currentIndex < numObjects: object_ = getByIndexMethod(currentIndex) currentIndex += 1 nextPageToken = None if currentIndex < numObjects: nextPageToken = str(currentIndex) yield object_.toProtocolElement(), nextPageToken
Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point.
entailment
def _protocolObjectGenerator(self, request, numObjects, getByIndexMethod): """ Returns a generator over the results for the specified request, from a set of protocol objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point. """ currentIndex = 0 if request.page_token: currentIndex, = paging._parsePageToken( request.page_token, 1) while currentIndex < numObjects: object_ = getByIndexMethod(currentIndex) currentIndex += 1 nextPageToken = None if currentIndex < numObjects: nextPageToken = str(currentIndex) yield object_, nextPageToken
Returns a generator over the results for the specified request, from a set of protocol objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point.
entailment
def _protocolListGenerator(self, request, objectList): """ Returns a generator over the objects in the specified list using _protocolObjectGenerator to generate page tokens. """ return self._protocolObjectGenerator( request, len(objectList), lambda index: objectList[index])
Returns a generator over the objects in the specified list using _protocolObjectGenerator to generate page tokens.
entailment
def _objectListGenerator(self, request, objectList): """ Returns a generator over the objects in the specified list using _topLevelObjectGenerator to generate page tokens. """ return self._topLevelObjectGenerator( request, len(objectList), lambda index: objectList[index])
Returns a generator over the objects in the specified list using _topLevelObjectGenerator to generate page tokens.
entailment
def datasetsGenerator(self, request): """ Returns a generator over the (dataset, nextPageToken) pairs defined by the specified request """ return self._topLevelObjectGenerator( request, self.getDataRepository().getNumDatasets(), self.getDataRepository().getDatasetByIndex)
Returns a generator over the (dataset, nextPageToken) pairs defined by the specified request
entailment
def phenotypeAssociationSetsGenerator(self, request): """ Returns a generator over the (phenotypeAssociationSet, nextPageToken) pairs defined by the specified request """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumPhenotypeAssociationSets(), dataset.getPhenotypeAssociationSetByIndex)
Returns a generator over the (phenotypeAssociationSet, nextPageToken) pairs defined by the specified request
entailment
def readGroupSetsGenerator(self, request): """ Returns a generator over the (readGroupSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._readGroupSetsGenerator( request, dataset.getNumReadGroupSets(), dataset.getReadGroupSetByIndex)
Returns a generator over the (readGroupSet, nextPageToken) pairs defined by the specified request.
entailment
def _readGroupSetsGenerator(self, request, numObjects, getByIndexMethod): """ Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point. """ currentIndex = 0 if request.page_token: currentIndex, = paging._parsePageToken( request.page_token, 1) while currentIndex < numObjects: obj = getByIndexMethod(currentIndex) include = True rgsp = obj.toProtocolElement() if request.name and request.name != obj.getLocalId(): include = False if request.biosample_id and include: rgsp.ClearField("read_groups") for readGroup in obj.getReadGroups(): if request.biosample_id == readGroup.getBiosampleId(): rgsp.read_groups.extend( [readGroup.toProtocolElement()]) # If none of the biosamples match and the readgroupset # contains reagroups, don't include in the response if len(rgsp.read_groups) == 0 and \ len(obj.getReadGroups()) != 0: include = False currentIndex += 1 nextPageToken = None if currentIndex < numObjects: nextPageToken = str(currentIndex) if include: yield rgsp, nextPageToken
Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point.
entailment
def referenceSetsGenerator(self, request): """ Returns a generator over the (referenceSet, nextPageToken) pairs defined by the specified request. """ results = [] for obj in self.getDataRepository().getReferenceSets(): include = True if request.md5checksum: if request.md5checksum != obj.getMd5Checksum(): include = False if request.accession: if request.accession not in obj.getSourceAccessions(): include = False if request.assembly_id: if request.assembly_id != obj.getAssemblyId(): include = False if include: results.append(obj) return self._objectListGenerator(request, results)
Returns a generator over the (referenceSet, nextPageToken) pairs defined by the specified request.
entailment
def referencesGenerator(self, request): """ Returns a generator over the (reference, nextPageToken) pairs defined by the specified request. """ referenceSet = self.getDataRepository().getReferenceSet( request.reference_set_id) results = [] for obj in referenceSet.getReferences(): include = True if request.md5checksum: if request.md5checksum != obj.getMd5Checksum(): include = False if request.accession: if request.accession not in obj.getSourceAccessions(): include = False if include: results.append(obj) return self._objectListGenerator(request, results)
Returns a generator over the (reference, nextPageToken) pairs defined by the specified request.
entailment
def variantSetsGenerator(self, request): """ Returns a generator over the (variantSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumVariantSets(), dataset.getVariantSetByIndex)
Returns a generator over the (variantSet, nextPageToken) pairs defined by the specified request.
entailment
def variantAnnotationSetsGenerator(self, request): """ Returns a generator over the (variantAnnotationSet, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId.parse( request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(request.variant_set_id) return self._topLevelObjectGenerator( request, variantSet.getNumVariantAnnotationSets(), variantSet.getVariantAnnotationSetByIndex)
Returns a generator over the (variantAnnotationSet, nextPageToken) pairs defined by the specified request.
entailment
def readsGenerator(self, request): """ Returns a generator over the (read, nextPageToken) pairs defined by the specified request """ if not request.reference_id: raise exceptions.UnmappedReadsNotSupported() if len(request.read_group_ids) < 1: raise exceptions.BadRequestException( "At least one readGroupId must be specified") elif len(request.read_group_ids) == 1: return self._readsGeneratorSingle(request) else: return self._readsGeneratorMultiple(request)
Returns a generator over the (read, nextPageToken) pairs defined by the specified request
entailment
def variantsGenerator(self, request): """ Returns a generator over the (variant, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId \ .parse(request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) intervalIterator = paging.VariantsIntervalIterator( request, variantSet) return intervalIterator
Returns a generator over the (variant, nextPageToken) pairs defined by the specified request.
entailment
def variantAnnotationsGenerator(self, request): """ Returns a generator over the (variantAnnotaitons, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantAnnotationSetCompoundId.parse( request.variant_annotation_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) variantAnnotationSet = variantSet.getVariantAnnotationSet( request.variant_annotation_set_id) iterator = paging.VariantAnnotationsIntervalIterator( request, variantAnnotationSet) return iterator
Returns a generator over the (variantAnnotaitons, nextPageToken) pairs defined by the specified request.
entailment
def featuresGenerator(self, request): """ Returns a generator over the (features, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None parentId = None if request.feature_set_id != "": compoundId = datamodel.FeatureSetCompoundId.parse( request.feature_set_id) if request.parent_id != "": compoundParentId = datamodel.FeatureCompoundId.parse( request.parent_id) parentId = compoundParentId.featureId # A client can optionally specify JUST the (compound) parentID, # and the server needs to derive the dataset & featureSet # from this (compound) parentID. if compoundId is None: compoundId = compoundParentId else: # check that the dataset and featureSet of the parent # compound ID is the same as that of the featureSetId mismatchCheck = ( compoundParentId.dataset_id != compoundId.dataset_id or compoundParentId.feature_set_id != compoundId.feature_set_id) if mismatchCheck: raise exceptions.ParentIncompatibleWithFeatureSet() if compoundId is None: raise exceptions.FeatureSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) featureSet = dataset.getFeatureSet(compoundId.feature_set_id) iterator = paging.FeaturesIterator( request, featureSet, parentId) return iterator
Returns a generator over the (features, nextPageToken) pairs defined by the (JSON string) request.
entailment
def continuousGenerator(self, request): """ Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None if request.continuous_set_id != "": compoundId = datamodel.ContinuousSetCompoundId.parse( request.continuous_set_id) if compoundId is None: raise exceptions.ContinuousSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) continuousSet = dataset.getContinuousSet(request.continuous_set_id) iterator = paging.ContinuousIterator(request, continuousSet) return iterator
Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request.
entailment
def phenotypesGenerator(self, request): """ Returns a generator over the (phenotypes, nextPageToken) pairs defined by the (JSON string) request """ # TODO make paging work using SPARQL? compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse( request.phenotype_association_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) phenotypeAssociationSet = dataset.getPhenotypeAssociationSet( compoundId.phenotypeAssociationSetId) associations = phenotypeAssociationSet.getAssociations(request) phenotypes = [association.phenotype for association in associations] return self._protocolListGenerator( request, phenotypes)
Returns a generator over the (phenotypes, nextPageToken) pairs defined by the (JSON string) request
entailment
def genotypesPhenotypesGenerator(self, request): """ Returns a generator over the (phenotypes, nextPageToken) pairs defined by the (JSON string) request """ # TODO make paging work using SPARQL? compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse( request.phenotype_association_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) phenotypeAssociationSet = dataset.getPhenotypeAssociationSet( compoundId.phenotypeAssociationSetId) featureSets = dataset.getFeatureSets() annotationList = phenotypeAssociationSet.getAssociations( request, featureSets) return self._protocolListGenerator(request, annotationList)
Returns a generator over the (phenotypes, nextPageToken) pairs defined by the (JSON string) request
entailment
def callSetsGenerator(self, request): """ Returns a generator over the (callSet, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId.parse( request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) results = [] for obj in variantSet.getCallSets(): include = True if request.name: if request.name != obj.getLocalId(): include = False if request.biosample_id: if request.biosample_id != obj.getBiosampleId(): include = False if include: results.append(obj) return self._objectListGenerator(request, results)
Returns a generator over the (callSet, nextPageToken) pairs defined by the specified request.
entailment
def featureSetsGenerator(self, request): """ Returns a generator over the (featureSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumFeatureSets(), dataset.getFeatureSetByIndex)
Returns a generator over the (featureSet, nextPageToken) pairs defined by the specified request.
entailment
def continuousSetsGenerator(self, request): """ Returns a generator over the (continuousSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumContinuousSets(), dataset.getContinuousSetByIndex)
Returns a generator over the (continuousSet, nextPageToken) pairs defined by the specified request.
entailment
def rnaQuantificationSetsGenerator(self, request): """ Returns a generator over the (rnaQuantificationSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumRnaQuantificationSets(), dataset.getRnaQuantificationSetByIndex)
Returns a generator over the (rnaQuantificationSet, nextPageToken) pairs defined by the specified request.
entailment
def rnaQuantificationsGenerator(self, request): """ Returns a generator over the (rnaQuantification, nextPageToken) pairs defined by the specified request. """ if len(request.rna_quantification_set_id) < 1: raise exceptions.BadRequestException( "Rna Quantification Set Id must be specified") else: compoundId = datamodel.RnaQuantificationSetCompoundId.parse( request.rna_quantification_set_id) dataset = self.getDataRepository().getDataset( compoundId.dataset_id) rnaQuantSet = dataset.getRnaQuantificationSet( compoundId.rna_quantification_set_id) results = [] for obj in rnaQuantSet.getRnaQuantifications(): include = True if request.biosample_id: if request.biosample_id != obj.getBiosampleId(): include = False if include: results.append(obj) return self._objectListGenerator(request, results)
Returns a generator over the (rnaQuantification, nextPageToken) pairs defined by the specified request.
entailment
def expressionLevelsGenerator(self, request): """ Returns a generator over the (expressionLevel, nextPageToken) pairs defined by the specified request. Currently only supports searching over a specified rnaQuantification """ rnaQuantificationId = request.rna_quantification_id compoundId = datamodel.RnaQuantificationCompoundId.parse( request.rna_quantification_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) rnaQuantSet = dataset.getRnaQuantificationSet( compoundId.rna_quantification_set_id) rnaQuant = rnaQuantSet.getRnaQuantification(rnaQuantificationId) rnaQuantificationId = rnaQuant.getLocalId() iterator = paging.ExpressionLevelsIterator( request, rnaQuant) return iterator
Returns a generator over the (expressionLevel, nextPageToken) pairs defined by the specified request. Currently only supports searching over a specified rnaQuantification
entailment
def runGetRequest(self, obj): """ Runs a get request by converting the specified datamodel object into its protocol representation. """ protocolElement = obj.toProtocolElement() jsonString = protocol.toJson(protocolElement) return jsonString
Runs a get request by converting the specified datamodel object into its protocol representation.
entailment
def runSearchRequest( self, requestStr, requestClass, responseClass, objectGenerator): """ Runs the specified request. The request is a string containing a JSON representation of an instance of the specified requestClass. We return a string representation of an instance of the specified responseClass in JSON format. Objects are filled into the page list using the specified object generator, which must return (object, nextPageToken) pairs, and be able to resume iteration from any point using the nextPageToken attribute of the request object. """ self.startProfile() try: request = protocol.fromJson(requestStr, requestClass) except protocol.json_format.ParseError: raise exceptions.InvalidJsonException(requestStr) # TODO How do we detect when the page size is not set? if not request.page_size: request.page_size = self._defaultPageSize if request.page_size < 0: raise exceptions.BadPageSizeException(request.page_size) responseBuilder = response_builder.SearchResponseBuilder( responseClass, request.page_size, self._maxResponseLength) nextPageToken = None for obj, nextPageToken in objectGenerator(request): responseBuilder.addValue(obj) if responseBuilder.isFull(): break responseBuilder.setNextPageToken(nextPageToken) responseString = responseBuilder.getSerializedResponse() self.endProfile() return responseString
Runs the specified request. The request is a string containing a JSON representation of an instance of the specified requestClass. We return a string representation of an instance of the specified responseClass in JSON format. Objects are filled into the page list using the specified object generator, which must return (object, nextPageToken) pairs, and be able to resume iteration from any point using the nextPageToken attribute of the request object.
entailment
def runListReferenceBases(self, requestJson): """ Runs a listReferenceBases request for the specified ID and request arguments. """ # In the case when an empty post request is made to the endpoint # we instantiate an empty ListReferenceBasesRequest. if not requestJson: request = protocol.ListReferenceBasesRequest() else: try: request = protocol.fromJson( requestJson, protocol.ListReferenceBasesRequest) except protocol.json_format.ParseError: raise exceptions.InvalidJsonException(requestJson) compoundId = datamodel.ReferenceCompoundId.parse(request.reference_id) referenceSet = self.getDataRepository().getReferenceSet( compoundId.reference_set_id) reference = referenceSet.getReference(request.reference_id) start = request.start end = request.end if end == 0: # assume meant "get all" end = reference.getLength() if request.page_token: pageTokenStr = request.page_token start = paging._parsePageToken(pageTokenStr, 1)[0] chunkSize = self._maxResponseLength nextPageToken = None if start + chunkSize < end: end = start + chunkSize nextPageToken = str(start + chunkSize) sequence = reference.getBases(start, end) # build response response = protocol.ListReferenceBasesResponse() response.offset = start response.sequence = sequence if nextPageToken: response.next_page_token = nextPageToken return protocol.toJson(response)
Runs a listReferenceBases request for the specified ID and request arguments.
entailment
def runGetCallSet(self, id_): """ Returns a callset with the given id """ compoundId = datamodel.CallSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) callSet = variantSet.getCallSet(id_) return self.runGetRequest(callSet)
Returns a callset with the given id
entailment
def runGetInfo(self, request): """ Returns information about the service including protocol version. """ return protocol.toJson(protocol.GetInfoResponse( protocol_version=protocol.version))
Returns information about the service including protocol version.
entailment
def runAddAnnouncement(self, flaskrequest): """ Takes a flask request from the frontend and attempts to parse into an AnnouncePeerRequest. If successful, it will log the announcement to the `announcement` table with some other metadata gathered from the request. """ announcement = {} # We want to parse the request ourselves to collect a little more # data about it. try: requestData = protocol.fromJson( flaskrequest.get_data(), protocol.AnnouncePeerRequest) announcement['hostname'] = flaskrequest.host_url announcement['remote_addr'] = flaskrequest.remote_addr announcement['user_agent'] = flaskrequest.headers.get('User-Agent') except AttributeError: # Sometimes in testing we will send protocol requests instead # of flask requests and so the hostname and user agent won't # be present. try: requestData = protocol.fromJson( flaskrequest, protocol.AnnouncePeerRequest) except Exception as e: raise exceptions.InvalidJsonException(e) except Exception as e: raise exceptions.InvalidJsonException(e) # Validate the url before accepting the announcement peer = datamodel.peers.Peer(requestData.peer.url) peer.setAttributesJson(protocol.toJson( requestData.peer.attributes)) announcement['url'] = peer.getUrl() announcement['attributes'] = peer.getAttributes() try: self.getDataRepository().insertAnnouncement(announcement) except: raise exceptions.BadRequestException(announcement['url']) return protocol.toJson( protocol.AnnouncePeerResponse(success=True))
Takes a flask request from the frontend and attempts to parse into an AnnouncePeerRequest. If successful, it will log the announcement to the `announcement` table with some other metadata gathered from the request.
entailment
def runListPeers(self, request): """ Takes a ListPeersRequest and returns a ListPeersResponse using a page_token and page_size if provided. """ return self.runSearchRequest( request, protocol.ListPeersRequest, protocol.ListPeersResponse, self.peersGenerator)
Takes a ListPeersRequest and returns a ListPeersResponse using a page_token and page_size if provided.
entailment
def runGetVariant(self, id_): """ Returns a variant with the given id """ compoundId = datamodel.VariantCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) gaVariant = variantSet.getVariant(compoundId) # TODO variant is a special case here, as it's returning a # protocol element rather than a datamodel object. We should # fix this for consistency. jsonString = protocol.toJson(gaVariant) return jsonString
Returns a variant with the given id
entailment
def runGetBiosample(self, id_): """ Runs a getBiosample request for the specified ID. """ compoundId = datamodel.BiosampleCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) biosample = dataset.getBiosample(id_) return self.runGetRequest(biosample)
Runs a getBiosample request for the specified ID.
entailment
def runGetIndividual(self, id_): """ Runs a getIndividual request for the specified ID. """ compoundId = datamodel.BiosampleCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) individual = dataset.getIndividual(id_) return self.runGetRequest(individual)
Runs a getIndividual request for the specified ID.
entailment
def runGetFeature(self, id_): """ Returns JSON string of the feature object corresponding to the feature compoundID passed in. """ compoundId = datamodel.FeatureCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) featureSet = dataset.getFeatureSet(compoundId.feature_set_id) gaFeature = featureSet.getFeature(compoundId) jsonString = protocol.toJson(gaFeature) return jsonString
Returns JSON string of the feature object corresponding to the feature compoundID passed in.
entailment
def runGetReadGroupSet(self, id_): """ Returns a readGroupSet with the given id_ """ compoundId = datamodel.ReadGroupSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(id_) return self.runGetRequest(readGroupSet)
Returns a readGroupSet with the given id_
entailment
def runGetReadGroup(self, id_): """ Returns a read group with the given id_ """ compoundId = datamodel.ReadGroupCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id) readGroup = readGroupSet.getReadGroup(id_) return self.runGetRequest(readGroup)
Returns a read group with the given id_
entailment
def runGetReference(self, id_): """ Runs a getReference request for the specified ID. """ compoundId = datamodel.ReferenceCompoundId.parse(id_) referenceSet = self.getDataRepository().getReferenceSet( compoundId.reference_set_id) reference = referenceSet.getReference(id_) return self.runGetRequest(reference)
Runs a getReference request for the specified ID.
entailment
def runGetReferenceSet(self, id_): """ Runs a getReferenceSet request for the specified ID. """ referenceSet = self.getDataRepository().getReferenceSet(id_) return self.runGetRequest(referenceSet)
Runs a getReferenceSet request for the specified ID.
entailment
def runGetVariantSet(self, id_): """ Runs a getVariantSet request for the specified ID. """ compoundId = datamodel.VariantSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(id_) return self.runGetRequest(variantSet)
Runs a getVariantSet request for the specified ID.
entailment
def runGetFeatureSet(self, id_): """ Runs a getFeatureSet request for the specified ID. """ compoundId = datamodel.FeatureSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) featureSet = dataset.getFeatureSet(id_) return self.runGetRequest(featureSet)
Runs a getFeatureSet request for the specified ID.
entailment
def runGetContinuousSet(self, id_): """ Runs a getContinuousSet request for the specified ID. """ compoundId = datamodel.ContinuousSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) continuousSet = dataset.getContinuousSet(id_) return self.runGetRequest(continuousSet)
Runs a getContinuousSet request for the specified ID.
entailment
def runGetDataset(self, id_): """ Runs a getDataset request for the specified ID. """ dataset = self.getDataRepository().getDataset(id_) return self.runGetRequest(dataset)
Runs a getDataset request for the specified ID.
entailment
def runGetVariantAnnotationSet(self, id_): """ Runs a getVariantSet request for the specified ID. """ compoundId = datamodel.VariantAnnotationSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) variantAnnotationSet = variantSet.getVariantAnnotationSet(id_) return self.runGetRequest(variantAnnotationSet)
Runs a getVariantSet request for the specified ID.
entailment
def runGetRnaQuantification(self, id_): """ Runs a getRnaQuantification request for the specified ID. """ compoundId = datamodel.RnaQuantificationCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) rnaQuantificationSet = dataset.getRnaQuantificationSet( compoundId.rna_quantification_set_id) rnaQuantification = rnaQuantificationSet.getRnaQuantification(id_) return self.runGetRequest(rnaQuantification)
Runs a getRnaQuantification request for the specified ID.
entailment
def runGetRnaQuantificationSet(self, id_): """ Runs a getRnaQuantificationSet request for the specified ID. """ compoundId = datamodel.RnaQuantificationSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) rnaQuantificationSet = dataset.getRnaQuantificationSet(id_) return self.runGetRequest(rnaQuantificationSet)
Runs a getRnaQuantificationSet request for the specified ID.
entailment
def runGetExpressionLevel(self, id_): """ Runs a getExpressionLevel request for the specified ID. """ compoundId = datamodel.ExpressionLevelCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) rnaQuantificationSet = dataset.getRnaQuantificationSet( compoundId.rna_quantification_set_id) rnaQuantification = rnaQuantificationSet.getRnaQuantification( compoundId.rna_quantification_id) expressionLevel = rnaQuantification.getExpressionLevel(compoundId) return self.runGetRequest(expressionLevel)
Runs a getExpressionLevel request for the specified ID.
entailment
def runSearchReadGroupSets(self, request): """ Runs the specified SearchReadGroupSetsRequest. """ return self.runSearchRequest( request, protocol.SearchReadGroupSetsRequest, protocol.SearchReadGroupSetsResponse, self.readGroupSetsGenerator)
Runs the specified SearchReadGroupSetsRequest.
entailment
def runSearchIndividuals(self, request): """ Runs the specified search SearchIndividualsRequest. """ return self.runSearchRequest( request, protocol.SearchIndividualsRequest, protocol.SearchIndividualsResponse, self.individualsGenerator)
Runs the specified search SearchIndividualsRequest.
entailment
def runSearchBiosamples(self, request): """ Runs the specified SearchBiosamplesRequest. """ return self.runSearchRequest( request, protocol.SearchBiosamplesRequest, protocol.SearchBiosamplesResponse, self.biosamplesGenerator)
Runs the specified SearchBiosamplesRequest.
entailment
def runSearchReads(self, request): """ Runs the specified SearchReadsRequest. """ return self.runSearchRequest( request, protocol.SearchReadsRequest, protocol.SearchReadsResponse, self.readsGenerator)
Runs the specified SearchReadsRequest.
entailment
def runSearchReferenceSets(self, request): """ Runs the specified SearchReferenceSetsRequest. """ return self.runSearchRequest( request, protocol.SearchReferenceSetsRequest, protocol.SearchReferenceSetsResponse, self.referenceSetsGenerator)
Runs the specified SearchReferenceSetsRequest.
entailment
def runSearchReferences(self, request): """ Runs the specified SearchReferenceRequest. """ return self.runSearchRequest( request, protocol.SearchReferencesRequest, protocol.SearchReferencesResponse, self.referencesGenerator)
Runs the specified SearchReferenceRequest.
entailment
def runSearchVariantSets(self, request): """ Runs the specified SearchVariantSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantSetsRequest, protocol.SearchVariantSetsResponse, self.variantSetsGenerator)
Runs the specified SearchVariantSetsRequest.
entailment
def runSearchVariantAnnotationSets(self, request): """ Runs the specified SearchVariantAnnotationSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantAnnotationSetsRequest, protocol.SearchVariantAnnotationSetsResponse, self.variantAnnotationSetsGenerator)
Runs the specified SearchVariantAnnotationSetsRequest.
entailment
def runSearchVariants(self, request): """ Runs the specified SearchVariantRequest. """ return self.runSearchRequest( request, protocol.SearchVariantsRequest, protocol.SearchVariantsResponse, self.variantsGenerator)
Runs the specified SearchVariantRequest.
entailment
def runSearchVariantAnnotations(self, request): """ Runs the specified SearchVariantAnnotationsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantAnnotationsRequest, protocol.SearchVariantAnnotationsResponse, self.variantAnnotationsGenerator)
Runs the specified SearchVariantAnnotationsRequest.
entailment
def runSearchCallSets(self, request): """ Runs the specified SearchCallSetsRequest. """ return self.runSearchRequest( request, protocol.SearchCallSetsRequest, protocol.SearchCallSetsResponse, self.callSetsGenerator)
Runs the specified SearchCallSetsRequest.
entailment
def runSearchDatasets(self, request): """ Runs the specified SearchDatasetsRequest. """ return self.runSearchRequest( request, protocol.SearchDatasetsRequest, protocol.SearchDatasetsResponse, self.datasetsGenerator)
Runs the specified SearchDatasetsRequest.
entailment
def runSearchFeatureSets(self, request): """ Returns a SearchFeatureSetsResponse for the specified SearchFeatureSetsRequest object. """ return self.runSearchRequest( request, protocol.SearchFeatureSetsRequest, protocol.SearchFeatureSetsResponse, self.featureSetsGenerator)
Returns a SearchFeatureSetsResponse for the specified SearchFeatureSetsRequest object.
entailment
def runSearchFeatures(self, request): """ Returns a SearchFeaturesResponse for the specified SearchFeaturesRequest object. :param request: JSON string representing searchFeaturesRequest :return: JSON string representing searchFeatureResponse """ return self.runSearchRequest( request, protocol.SearchFeaturesRequest, protocol.SearchFeaturesResponse, self.featuresGenerator)
Returns a SearchFeaturesResponse for the specified SearchFeaturesRequest object. :param request: JSON string representing searchFeaturesRequest :return: JSON string representing searchFeatureResponse
entailment